Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.

Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.

This one sat in -next for weeks. -3KLoC"

* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...

+1481 -4358
+55
arch/alpha/include/asm/extable.h
··· 1 + #ifndef _ASM_EXTABLE_H 2 + #define _ASM_EXTABLE_H 3 + 4 + /* 5 + * About the exception table: 6 + * 7 + * - insn is a 32-bit pc-relative offset from the faulting insn. 8 + * - nextinsn is a 16-bit offset off of the faulting instruction 9 + * (not off of the *next* instruction as branches are). 10 + * - errreg is the register in which to place -EFAULT. 11 + * - valreg is the final target register for the load sequence 12 + * and will be zeroed. 13 + * 14 + * Either errreg or valreg may be $31, in which case nothing happens. 15 + * 16 + * The exception fixup information "just so happens" to be arranged 17 + * as in a MEM format instruction. This lets us emit our three 18 + * values like so: 19 + * 20 + * lda valreg, nextinsn(errreg) 21 + * 22 + */ 23 + 24 + struct exception_table_entry 25 + { 26 + signed int insn; 27 + union exception_fixup { 28 + unsigned unit; 29 + struct { 30 + signed int nextinsn : 16; 31 + unsigned int errreg : 5; 32 + unsigned int valreg : 5; 33 + } bits; 34 + } fixup; 35 + }; 36 + 37 + /* Returns the new pc */ 38 + #define fixup_exception(map_reg, _fixup, pc) \ 39 + ({ \ 40 + if ((_fixup)->fixup.bits.valreg != 31) \ 41 + map_reg((_fixup)->fixup.bits.valreg) = 0; \ 42 + if ((_fixup)->fixup.bits.errreg != 31) \ 43 + map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \ 44 + (pc) + (_fixup)->fixup.bits.nextinsn; \ 45 + }) 46 + 47 + #define ARCH_HAS_RELATIVE_EXTABLE 48 + 49 + #define swap_ex_entry_fixup(a, b, tmp, delta) \ 50 + do { \ 51 + (a)->fixup.unit = (b)->fixup.unit; \ 52 + (b)->fixup.unit = (tmp).fixup.unit; \ 53 + } while (0) 54 + 55 + #endif
+4 -12
arch/alpha/include/asm/futex.h
··· 19 19 "3: .subsection 2\n" \ 20 20 "4: br 1b\n" \ 21 21 " .previous\n" \ 22 - " .section __ex_table,\"a\"\n" \ 23 - " .long 1b-.\n" \ 24 - " lda $31,3b-1b(%1)\n" \ 25 - " .long 2b-.\n" \ 26 - " lda $31,3b-2b(%1)\n" \ 27 - " .previous\n" \ 22 + EXC(1b,3b,%1,$31) \ 23 + EXC(2b,3b,%1,$31) \ 28 24 : "=&r" (oldval), "=&r"(ret) \ 29 25 : "r" (uaddr), "r"(oparg) \ 30 26 : "memory") ··· 97 101 "3: .subsection 2\n" 98 102 "4: br 1b\n" 99 103 " .previous\n" 100 - " .section __ex_table,\"a\"\n" 101 - " .long 1b-.\n" 102 - " lda $31,3b-1b(%0)\n" 103 - " .long 2b-.\n" 104 - " lda $31,3b-2b(%0)\n" 105 - " .previous\n" 104 + EXC(1b,3b,%0,$31) 105 + EXC(2b,3b,%0,$31) 106 106 : "+r"(ret), "=&r"(prev), "=&r"(cmp) 107 107 : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) 108 108 : "memory");
+73 -232
arch/alpha/include/asm/uaccess.h
··· 1 1 #ifndef __ALPHA_UACCESS_H 2 2 #define __ALPHA_UACCESS_H 3 3 4 - #include <linux/errno.h> 5 - #include <linux/sched.h> 6 - 7 - 8 4 /* 9 5 * The fs value determines whether argument validity checking should be 10 6 * performed or not. If get_fs() == USER_DS, checking is performed, with ··· 15 19 16 20 #define KERNEL_DS ((mm_segment_t) { 0UL }) 17 21 #define USER_DS ((mm_segment_t) { -0x40000000000UL }) 18 - 19 - #define VERIFY_READ 0 20 - #define VERIFY_WRITE 1 21 22 22 23 #define get_fs() (current_thread_info()->addr_limit) 23 24 #define get_ds() (KERNEL_DS) ··· 32 39 * - AND "addr+size" doesn't have any high-bits set 33 40 * - OR we are in kernel mode. 34 41 */ 35 - #define __access_ok(addr, size, segment) \ 36 - (((segment).seg & (addr | size | (addr+size))) == 0) 42 + #define __access_ok(addr, size) \ 43 + ((get_fs().seg & (addr | size | (addr+size))) == 0) 37 44 38 - #define access_ok(type, addr, size) \ 39 - ({ \ 40 - __chk_user_ptr(addr); \ 41 - __access_ok(((unsigned long)(addr)), (size), get_fs()); \ 45 + #define access_ok(type, addr, size) \ 46 + ({ \ 47 + __chk_user_ptr(addr); \ 48 + __access_ok(((unsigned long)(addr)), (size)); \ 42 49 }) 43 50 44 51 /* ··· 54 61 * (b) require any knowledge of processes at this stage 55 62 */ 56 63 #define put_user(x, ptr) \ 57 - __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) 64 + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 58 65 #define get_user(x, ptr) \ 59 - __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) 66 + __get_user_check((x), (ptr), sizeof(*(ptr))) 60 67 61 68 /* 62 69 * The "__xxx" versions do not do address space checking, useful when ··· 74 81 * more extensive comments with fixup_inline_exception below for 75 82 * more information. 76 83 */ 84 + #define EXC(label,cont,res,err) \ 85 + ".section __ex_table,\"a\"\n" \ 86 + " .long "#label"-.\n" \ 87 + " lda "#res","#cont"-"#label"("#err")\n" \ 88 + ".previous\n" 77 89 78 90 extern void __get_user_unknown(void); 79 91 ··· 98 100 __gu_err; \ 99 101 }) 100 102 101 - #define __get_user_check(x, ptr, size, segment) \ 102 - ({ \ 103 - long __gu_err = -EFAULT; \ 104 - unsigned long __gu_val = 0; \ 105 - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 106 - if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ 107 - __gu_err = 0; \ 108 - switch (size) { \ 109 - case 1: __get_user_8(__gu_addr); break; \ 110 - case 2: __get_user_16(__gu_addr); break; \ 111 - case 4: __get_user_32(__gu_addr); break; \ 112 - case 8: __get_user_64(__gu_addr); break; \ 113 - default: __get_user_unknown(); break; \ 114 - } \ 115 - } \ 116 - (x) = (__force __typeof__(*(ptr))) __gu_val; \ 117 - __gu_err; \ 103 + #define __get_user_check(x, ptr, size) \ 104 + ({ \ 105 + long __gu_err = -EFAULT; \ 106 + unsigned long __gu_val = 0; \ 107 + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 108 + if (__access_ok((unsigned long)__gu_addr, size)) { \ 109 + __gu_err = 0; \ 110 + switch (size) { \ 111 + case 1: __get_user_8(__gu_addr); break; \ 112 + case 2: __get_user_16(__gu_addr); break; \ 113 + case 4: __get_user_32(__gu_addr); break; \ 114 + case 8: __get_user_64(__gu_addr); break; \ 115 + default: __get_user_unknown(); break; \ 116 + } \ 117 + } \ 118 + (x) = (__force __typeof__(*(ptr))) __gu_val; \ 119 + __gu_err; \ 118 120 }) 119 121 120 122 struct __large_struct { unsigned long buf[100]; }; ··· 123 125 #define __get_user_64(addr) \ 124 126 __asm__("1: ldq %0,%2\n" \ 125 127 "2:\n" \ 126 - ".section __ex_table,\"a\"\n" \ 127 - " .long 1b - .\n" \ 128 - " lda %0, 2b-1b(%1)\n" \ 129 - ".previous" \ 128 + EXC(1b,2b,%0,%1) \ 130 129 : "=r"(__gu_val), "=r"(__gu_err) \ 131 130 : "m"(__m(addr)), "1"(__gu_err)) 132 131 133 132 #define __get_user_32(addr) \ 134 133 __asm__("1: ldl %0,%2\n" \ 135 134 "2:\n" \ 136 - ".section __ex_table,\"a\"\n" \ 137 - " .long 1b - .\n" \ 138 - " lda %0, 2b-1b(%1)\n" \ 139 - ".previous" \ 135 + EXC(1b,2b,%0,%1) \ 140 136 : "=r"(__gu_val), "=r"(__gu_err) \ 141 137 : "m"(__m(addr)), "1"(__gu_err)) 142 138 ··· 140 148 #define __get_user_16(addr) \ 141 149 __asm__("1: ldwu %0,%2\n" \ 142 150 "2:\n" \ 143 - ".section __ex_table,\"a\"\n" \ 144 - " .long 1b - .\n" \ 145 - " lda %0, 2b-1b(%1)\n" \ 146 - ".previous" \ 151 + EXC(1b,2b,%0,%1) \ 147 152 : "=r"(__gu_val), "=r"(__gu_err) \ 148 153 : "m"(__m(addr)), "1"(__gu_err)) 149 154 150 155 #define __get_user_8(addr) \ 151 156 __asm__("1: ldbu %0,%2\n" \ 152 157 "2:\n" \ 153 - ".section __ex_table,\"a\"\n" \ 154 - " .long 1b - .\n" \ 155 - " lda %0, 2b-1b(%1)\n" \ 156 - ".previous" \ 158 + EXC(1b,2b,%0,%1) \ 157 159 : "=r"(__gu_val), "=r"(__gu_err) \ 158 160 : "m"(__m(addr)), "1"(__gu_err)) 159 161 #else ··· 163 177 " extwh %1,%3,%1\n" \ 164 178 " or %0,%1,%0\n" \ 165 179 "3:\n" \ 166 - ".section __ex_table,\"a\"\n" \ 167 - " .long 1b - .\n" \ 168 - " lda %0, 3b-1b(%2)\n" \ 169 - " .long 2b - .\n" \ 170 - " lda %0, 3b-2b(%2)\n" \ 171 - ".previous" \ 180 + EXC(1b,3b,%0,%2) \ 181 + EXC(2b,3b,%0,%2) \ 172 182 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ 173 183 : "r"(addr), "2"(__gu_err)); \ 174 184 } ··· 173 191 __asm__("1: ldq_u %0,0(%2)\n" \ 174 192 " extbl %0,%2,%0\n" \ 175 193 "2:\n" \ 176 - ".section __ex_table,\"a\"\n" \ 177 - " .long 1b - .\n" \ 178 - " lda %0, 2b-1b(%1)\n" \ 179 - ".previous" \ 194 + EXC(1b,2b,%0,%1) \ 180 195 : "=&r"(__gu_val), "=r"(__gu_err) \ 181 196 : "r"(addr), "1"(__gu_err)) 182 197 #endif ··· 194 215 __pu_err; \ 195 216 }) 196 217 197 - #define __put_user_check(x, ptr, size, segment) \ 198 - ({ \ 199 - long __pu_err = -EFAULT; \ 200 - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 201 - if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ 202 - __pu_err = 0; \ 203 - switch (size) { \ 204 - case 1: __put_user_8(x, __pu_addr); break; \ 205 - case 2: __put_user_16(x, __pu_addr); break; \ 206 - case 4: __put_user_32(x, __pu_addr); break; \ 207 - case 8: __put_user_64(x, __pu_addr); break; \ 208 - default: __put_user_unknown(); break; \ 209 - } \ 210 - } \ 211 - __pu_err; \ 218 + #define __put_user_check(x, ptr, size) \ 219 + ({ \ 220 + long __pu_err = -EFAULT; \ 221 + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 222 + if (__access_ok((unsigned long)__pu_addr, size)) { \ 223 + __pu_err = 0; \ 224 + switch (size) { \ 225 + case 1: __put_user_8(x, __pu_addr); break; \ 226 + case 2: __put_user_16(x, __pu_addr); break; \ 227 + case 4: __put_user_32(x, __pu_addr); break; \ 228 + case 8: __put_user_64(x, __pu_addr); break; \ 229 + default: __put_user_unknown(); break; \ 230 + } \ 231 + } \ 232 + __pu_err; \ 212 233 }) 213 234 214 235 /* ··· 219 240 #define __put_user_64(x, addr) \ 220 241 __asm__ __volatile__("1: stq %r2,%1\n" \ 221 242 "2:\n" \ 222 - ".section __ex_table,\"a\"\n" \ 223 - " .long 1b - .\n" \ 224 - " lda $31,2b-1b(%0)\n" \ 225 - ".previous" \ 243 + EXC(1b,2b,$31,%0) \ 226 244 : "=r"(__pu_err) \ 227 245 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) 228 246 229 247 #define __put_user_32(x, addr) \ 230 248 __asm__ __volatile__("1: stl %r2,%1\n" \ 231 249 "2:\n" \ 232 - ".section __ex_table,\"a\"\n" \ 233 - " .long 1b - .\n" \ 234 - " lda $31,2b-1b(%0)\n" \ 235 - ".previous" \ 250 + EXC(1b,2b,$31,%0) \ 236 251 : "=r"(__pu_err) \ 237 252 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 238 253 ··· 236 263 #define __put_user_16(x, addr) \ 237 264 __asm__ __volatile__("1: stw %r2,%1\n" \ 238 265 "2:\n" \ 239 - ".section __ex_table,\"a\"\n" \ 240 - " .long 1b - .\n" \ 241 - " lda $31,2b-1b(%0)\n" \ 242 - ".previous" \ 266 + EXC(1b,2b,$31,%0) \ 243 267 : "=r"(__pu_err) \ 244 268 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 245 269 246 270 #define __put_user_8(x, addr) \ 247 271 __asm__ __volatile__("1: stb %r2,%1\n" \ 248 272 "2:\n" \ 249 - ".section __ex_table,\"a\"\n" \ 250 - " .long 1b - .\n" \ 251 - " lda $31,2b-1b(%0)\n" \ 252 - ".previous" \ 273 + EXC(1b,2b,$31,%0) \ 253 274 : "=r"(__pu_err) \ 254 275 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 255 276 #else ··· 265 298 "3: stq_u %2,1(%5)\n" \ 266 299 "4: stq_u %1,0(%5)\n" \ 267 300 "5:\n" \ 268 - ".section __ex_table,\"a\"\n" \ 269 - " .long 1b - .\n" \ 270 - " lda $31, 5b-1b(%0)\n" \ 271 - " .long 2b - .\n" \ 272 - " lda $31, 5b-2b(%0)\n" \ 273 - " .long 3b - .\n" \ 274 - " lda $31, 5b-3b(%0)\n" \ 275 - " .long 4b - .\n" \ 276 - " lda $31, 5b-4b(%0)\n" \ 277 - ".previous" \ 301 + EXC(1b,5b,$31,%0) \ 302 + EXC(2b,5b,$31,%0) \ 303 + EXC(3b,5b,$31,%0) \ 304 + EXC(4b,5b,$31,%0) \ 278 305 : "=r"(__pu_err), "=&r"(__pu_tmp1), \ 279 306 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ 280 307 "=&r"(__pu_tmp4) \ ··· 285 324 " or %1,%2,%1\n" \ 286 325 "2: stq_u %1,0(%4)\n" \ 287 326 "3:\n" \ 288 - ".section __ex_table,\"a\"\n" \ 289 - " .long 1b - .\n" \ 290 - " lda $31, 3b-1b(%0)\n" \ 291 - " .long 2b - .\n" \ 292 - " lda $31, 3b-2b(%0)\n" \ 293 - ".previous" \ 327 + EXC(1b,3b,$31,%0) \ 328 + EXC(2b,3b,$31,%0) \ 294 329 : "=r"(__pu_err), \ 295 330 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ 296 331 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ ··· 298 341 * Complex access routines 299 342 */ 300 343 301 - /* This little bit of silliness is to get the GP loaded for a function 302 - that ordinarily wouldn't. Otherwise we could have it done by the macro 303 - directly, which can be optimized the linker. */ 304 - #ifdef MODULE 305 - #define __module_address(sym) "r"(sym), 306 - #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym 307 - #else 308 - #define __module_address(sym) 309 - #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" 310 - #endif 344 + extern long __copy_user(void *to, const void *from, long len); 311 345 312 - extern void __copy_user(void); 313 - 314 - extern inline long 315 - __copy_tofrom_user_nocheck(void *to, const void *from, long len) 346 + static inline unsigned long 347 + raw_copy_from_user(void *to, const void __user *from, unsigned long len) 316 348 { 317 - register void * __cu_to __asm__("$6") = to; 318 - register const void * __cu_from __asm__("$7") = from; 319 - register long __cu_len __asm__("$0") = len; 320 - 321 - __asm__ __volatile__( 322 - __module_call(28, 3, __copy_user) 323 - : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) 324 - : __module_address(__copy_user) 325 - "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) 326 - : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); 327 - 328 - return __cu_len; 349 + return __copy_user(to, (__force const void *)from, len); 329 350 } 330 351 331 - #define __copy_to_user(to, from, n) \ 332 - ({ \ 333 - __chk_user_ptr(to); \ 334 - __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ 335 - }) 336 - #define __copy_from_user(to, from, n) \ 337 - ({ \ 338 - __chk_user_ptr(from); \ 339 - __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ 340 - }) 341 - 342 - #define __copy_to_user_inatomic __copy_to_user 343 - #define __copy_from_user_inatomic __copy_from_user 344 - 345 - extern inline long 346 - copy_to_user(void __user *to, const void *from, long n) 352 + static inline unsigned long 353 + raw_copy_to_user(void __user *to, const void *from, unsigned long len) 347 354 { 348 - if (likely(__access_ok((unsigned long)to, n, get_fs()))) 349 - n = __copy_tofrom_user_nocheck((__force void *)to, from, n); 350 - return n; 355 + return __copy_user((__force void *)to, from, len); 351 356 } 352 357 353 - extern inline long 354 - copy_from_user(void *to, const void __user *from, long n) 355 - { 356 - long res = n; 357 - if (likely(__access_ok((unsigned long)from, n, get_fs()))) 358 - res = __copy_from_user_inatomic(to, from, n); 359 - if (unlikely(res)) 360 - memset(to + (n - res), 0, res); 361 - return res; 362 - } 363 - 364 - extern void __do_clear_user(void); 365 - 366 - extern inline long 367 - __clear_user(void __user *to, long len) 368 - { 369 - register void __user * __cl_to __asm__("$6") = to; 370 - register long __cl_len __asm__("$0") = len; 371 - __asm__ __volatile__( 372 - __module_call(28, 2, __do_clear_user) 373 - : "=r"(__cl_len), "=r"(__cl_to) 374 - : __module_address(__do_clear_user) 375 - "0"(__cl_len), "1"(__cl_to) 376 - : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); 377 - return __cl_len; 378 - } 358 + extern long __clear_user(void __user *to, long len); 379 359 380 360 extern inline long 381 361 clear_user(void __user *to, long len) 382 362 { 383 - if (__access_ok((unsigned long)to, len, get_fs())) 363 + if (__access_ok((unsigned long)to, len)) 384 364 len = __clear_user(to, len); 385 365 return len; 386 366 } 387 367 388 - #undef __module_address 389 - #undef __module_call 390 - 391 368 #define user_addr_max() \ 392 - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 369 + (uaccess_kernel() ? ~0UL : TASK_SIZE) 393 370 394 371 extern long strncpy_from_user(char *dest, const char __user *src, long count); 395 372 extern __must_check long strlen_user(const char __user *str); 396 373 extern __must_check long strnlen_user(const char __user *str, long n); 397 374 398 - /* 399 - * About the exception table: 400 - * 401 - * - insn is a 32-bit pc-relative offset from the faulting insn. 402 - * - nextinsn is a 16-bit offset off of the faulting instruction 403 - * (not off of the *next* instruction as branches are). 404 - * - errreg is the register in which to place -EFAULT. 405 - * - valreg is the final target register for the load sequence 406 - * and will be zeroed. 407 - * 408 - * Either errreg or valreg may be $31, in which case nothing happens. 409 - * 410 - * The exception fixup information "just so happens" to be arranged 411 - * as in a MEM format instruction. This lets us emit our three 412 - * values like so: 413 - * 414 - * lda valreg, nextinsn(errreg) 415 - * 416 - */ 417 - 418 - struct exception_table_entry 419 - { 420 - signed int insn; 421 - union exception_fixup { 422 - unsigned unit; 423 - struct { 424 - signed int nextinsn : 16; 425 - unsigned int errreg : 5; 426 - unsigned int valreg : 5; 427 - } bits; 428 - } fixup; 429 - }; 430 - 431 - /* Returns the new pc */ 432 - #define fixup_exception(map_reg, _fixup, pc) \ 433 - ({ \ 434 - if ((_fixup)->fixup.bits.valreg != 31) \ 435 - map_reg((_fixup)->fixup.bits.valreg) = 0; \ 436 - if ((_fixup)->fixup.bits.errreg != 31) \ 437 - map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \ 438 - (pc) + (_fixup)->fixup.bits.nextinsn; \ 439 - }) 440 - 441 - #define ARCH_HAS_RELATIVE_EXTABLE 442 - 443 - #define swap_ex_entry_fixup(a, b, tmp, delta) \ 444 - do { \ 445 - (a)->fixup.unit = (b)->fixup.unit; \ 446 - (b)->fixup.unit = (tmp).fixup.unit; \ 447 - } while (0) 448 - 375 + #include <asm/extable.h> 449 376 450 377 #endif /* __ALPHA_UACCESS_H */
+42 -110
arch/alpha/kernel/traps.c
··· 482 482 " extwl %1,%3,%1\n" 483 483 " extwh %2,%3,%2\n" 484 484 "3:\n" 485 - ".section __ex_table,\"a\"\n" 486 - " .long 1b - .\n" 487 - " lda %1,3b-1b(%0)\n" 488 - " .long 2b - .\n" 489 - " lda %2,3b-2b(%0)\n" 490 - ".previous" 485 + EXC(1b,3b,%1,%0) 486 + EXC(2b,3b,%2,%0) 491 487 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) 492 488 : "r"(va), "0"(0)); 493 489 if (error) ··· 498 502 " extll %1,%3,%1\n" 499 503 " extlh %2,%3,%2\n" 500 504 "3:\n" 501 - ".section __ex_table,\"a\"\n" 502 - " .long 1b - .\n" 503 - " lda %1,3b-1b(%0)\n" 504 - " .long 2b - .\n" 505 - " lda %2,3b-2b(%0)\n" 506 - ".previous" 505 + EXC(1b,3b,%1,%0) 506 + EXC(2b,3b,%2,%0) 507 507 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) 508 508 : "r"(va), "0"(0)); 509 509 if (error) ··· 514 522 " extql %1,%3,%1\n" 515 523 " extqh %2,%3,%2\n" 516 524 "3:\n" 517 - ".section __ex_table,\"a\"\n" 518 - " .long 1b - .\n" 519 - " lda %1,3b-1b(%0)\n" 520 - " .long 2b - .\n" 521 - " lda %2,3b-2b(%0)\n" 522 - ".previous" 525 + EXC(1b,3b,%1,%0) 526 + EXC(2b,3b,%2,%0) 523 527 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) 524 528 : "r"(va), "0"(0)); 525 529 if (error) ··· 539 551 "3: stq_u %2,1(%5)\n" 540 552 "4: stq_u %1,0(%5)\n" 541 553 "5:\n" 542 - ".section __ex_table,\"a\"\n" 543 - " .long 1b - .\n" 544 - " lda %2,5b-1b(%0)\n" 545 - " .long 2b - .\n" 546 - " lda %1,5b-2b(%0)\n" 547 - " .long 3b - .\n" 548 - " lda $31,5b-3b(%0)\n" 549 - " .long 4b - .\n" 550 - " lda $31,5b-4b(%0)\n" 551 - ".previous" 554 + EXC(1b,5b,%2,%0) 555 + EXC(2b,5b,%1,%0) 556 + EXC(3b,5b,$31,%0) 557 + EXC(4b,5b,$31,%0) 552 558 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), 553 559 "=&r"(tmp3), "=&r"(tmp4) 554 560 : "r"(va), "r"(una_reg(reg)), "0"(0)); ··· 563 581 "3: stq_u %2,3(%5)\n" 564 582 "4: stq_u %1,0(%5)\n" 565 583 "5:\n" 566 - ".section __ex_table,\"a\"\n" 567 - " .long 1b - .\n" 568 - " lda %2,5b-1b(%0)\n" 569 - " .long 2b - .\n" 570 - " lda %1,5b-2b(%0)\n" 571 - " .long 3b - .\n" 572 - " lda $31,5b-3b(%0)\n" 573 - " .long 4b - .\n" 574 - " lda $31,5b-4b(%0)\n" 575 - ".previous" 584 + EXC(1b,5b,%2,%0) 585 + EXC(2b,5b,%1,%0) 586 + EXC(3b,5b,$31,%0) 587 + EXC(4b,5b,$31,%0) 576 588 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), 577 589 "=&r"(tmp3), "=&r"(tmp4) 578 590 : "r"(va), "r"(una_reg(reg)), "0"(0)); ··· 587 611 "3: stq_u %2,7(%5)\n" 588 612 "4: stq_u %1,0(%5)\n" 589 613 "5:\n" 590 - ".section __ex_table,\"a\"\n\t" 591 - " .long 1b - .\n" 592 - " lda %2,5b-1b(%0)\n" 593 - " .long 2b - .\n" 594 - " lda %1,5b-2b(%0)\n" 595 - " .long 3b - .\n" 596 - " lda $31,5b-3b(%0)\n" 597 - " .long 4b - .\n" 598 - " lda $31,5b-4b(%0)\n" 599 - ".previous" 614 + EXC(1b,5b,%2,%0) 615 + EXC(2b,5b,%1,%0) 616 + EXC(3b,5b,$31,%0) 617 + EXC(4b,5b,$31,%0) 600 618 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), 601 619 "=&r"(tmp3), "=&r"(tmp4) 602 620 : "r"(va), "r"(una_reg(reg)), "0"(0)); ··· 772 802 /* Don't bother reading ds in the access check since we already 773 803 know that this came from the user. Also rely on the fact that 774 804 the page at TASK_SIZE is unmapped and so can't be touched anyway. */ 775 - if (!__access_ok((unsigned long)va, 0, USER_DS)) 805 + if ((unsigned long)va >= TASK_SIZE) 776 806 goto give_sigsegv; 777 807 778 808 ++unaligned[1].count; ··· 805 835 " extwl %1,%3,%1\n" 806 836 " extwh %2,%3,%2\n" 807 837 "3:\n" 808 - ".section __ex_table,\"a\"\n" 809 - " .long 1b - .\n" 810 - " lda %1,3b-1b(%0)\n" 811 - " .long 2b - .\n" 812 - " lda %2,3b-2b(%0)\n" 813 - ".previous" 838 + EXC(1b,3b,%1,%0) 839 + EXC(2b,3b,%2,%0) 814 840 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) 815 841 : "r"(va), "0"(0)); 816 842 if (error) ··· 821 855 " extll %1,%3,%1\n" 822 856 " extlh %2,%3,%2\n" 823 857 "3:\n" 824 - ".section __ex_table,\"a\"\n" 825 - " .long 1b - .\n" 826 - " lda %1,3b-1b(%0)\n" 827 - " .long 2b - .\n" 828 - " lda %2,3b-2b(%0)\n" 829 - ".previous" 858 + EXC(1b,3b,%1,%0) 859 + EXC(2b,3b,%2,%0) 830 860 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) 831 861 : "r"(va), "0"(0)); 832 862 if (error) ··· 837 875 " extql %1,%3,%1\n" 838 876 " extqh %2,%3,%2\n" 839 877 "3:\n" 840 - ".section __ex_table,\"a\"\n" 841 - " .long 1b - .\n" 842 - " lda %1,3b-1b(%0)\n" 843 - " .long 2b - .\n" 844 - " lda %2,3b-2b(%0)\n" 845 - ".previous" 878 + EXC(1b,3b,%1,%0) 879 + EXC(2b,3b,%2,%0) 846 880 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) 847 881 : "r"(va), "0"(0)); 848 882 if (error) ··· 853 895 " extll %1,%3,%1\n" 854 896 " extlh %2,%3,%2\n" 855 897 "3:\n" 856 - ".section __ex_table,\"a\"\n" 857 - " .long 1b - .\n" 858 - " lda %1,3b-1b(%0)\n" 859 - " .long 2b - .\n" 860 - " lda %2,3b-2b(%0)\n" 861 - ".previous" 898 + EXC(1b,3b,%1,%0) 899 + EXC(2b,3b,%2,%0) 862 900 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) 863 901 : "r"(va), "0"(0)); 864 902 if (error) ··· 869 915 " extql %1,%3,%1\n" 870 916 " extqh %2,%3,%2\n" 871 917 "3:\n" 872 - ".section __ex_table,\"a\"\n" 873 - " .long 1b - .\n" 874 - " lda %1,3b-1b(%0)\n" 875 - " .long 2b - .\n" 876 - " lda %2,3b-2b(%0)\n" 877 - ".previous" 918 + EXC(1b,3b,%1,%0) 919 + EXC(2b,3b,%2,%0) 878 920 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) 879 921 : "r"(va), "0"(0)); 880 922 if (error) ··· 894 944 "3: stq_u %2,1(%5)\n" 895 945 "4: stq_u %1,0(%5)\n" 896 946 "5:\n" 897 - ".section __ex_table,\"a\"\n" 898 - " .long 1b - .\n" 899 - " lda %2,5b-1b(%0)\n" 900 - " .long 2b - .\n" 901 - " lda %1,5b-2b(%0)\n" 902 - " .long 3b - .\n" 903 - " lda $31,5b-3b(%0)\n" 904 - " .long 4b - .\n" 905 - " lda $31,5b-4b(%0)\n" 906 - ".previous" 947 + EXC(1b,5b,%2,%0) 948 + EXC(2b,5b,%1,%0) 949 + EXC(3b,5b,$31,%0) 950 + EXC(4b,5b,$31,%0) 907 951 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), 908 952 "=&r"(tmp3), "=&r"(tmp4) 909 953 : "r"(va), "r"(*reg_addr), "0"(0)); ··· 922 978 "3: stq_u %2,3(%5)\n" 923 979 "4: stq_u %1,0(%5)\n" 924 980 "5:\n" 925 - ".section __ex_table,\"a\"\n" 926 - " .long 1b - .\n" 927 - " lda %2,5b-1b(%0)\n" 928 - " .long 2b - .\n" 929 - " lda %1,5b-2b(%0)\n" 930 - " .long 3b - .\n" 931 - " lda $31,5b-3b(%0)\n" 932 - " .long 4b - .\n" 933 - " lda $31,5b-4b(%0)\n" 934 - ".previous" 981 + EXC(1b,5b,%2,%0) 982 + EXC(2b,5b,%1,%0) 983 + EXC(3b,5b,$31,%0) 984 + EXC(4b,5b,$31,%0) 935 985 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), 936 986 "=&r"(tmp3), "=&r"(tmp4) 937 987 : "r"(va), "r"(*reg_addr), "0"(0)); ··· 950 1012 "3: stq_u %2,7(%5)\n" 951 1013 "4: stq_u %1,0(%5)\n" 952 1014 "5:\n" 953 - ".section __ex_table,\"a\"\n\t" 954 - " .long 1b - .\n" 955 - " lda %2,5b-1b(%0)\n" 956 - " .long 2b - .\n" 957 - " lda %1,5b-2b(%0)\n" 958 - " .long 3b - .\n" 959 - " lda $31,5b-3b(%0)\n" 960 - " .long 4b - .\n" 961 - " lda $31,5b-4b(%0)\n" 962 - ".previous" 1015 + EXC(1b,5b,%2,%0) 1016 + EXC(2b,5b,%1,%0) 1017 + EXC(3b,5b,$31,%0) 1018 + EXC(4b,5b,$31,%0) 963 1019 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), 964 1020 "=&r"(tmp3), "=&r"(tmp4) 965 1021 : "r"(va), "r"(*reg_addr), "0"(0)); ··· 979 1047 /* We need to replicate some of the logic in mm/fault.c, 980 1048 since we don't have access to the fault code in the 981 1049 exception handling return path. */ 982 - if (!__access_ok((unsigned long)va, 0, USER_DS)) 1050 + if ((unsigned long)va >= TASK_SIZE) 983 1051 info.si_code = SEGV_ACCERR; 984 1052 else { 985 1053 struct mm_struct *mm = current->mm;
+26 -40
arch/alpha/lib/clear_user.S
··· 8 8 * right "bytes left to zero" value (and that it is updated only _after_ 9 9 * a successful copy). There is also some rather minor exception setup 10 10 * stuff. 11 - * 12 - * NOTE! This is not directly C-callable, because the calling semantics 13 - * are different: 14 - * 15 - * Inputs: 16 - * length in $0 17 - * destination address in $6 18 - * exception pointer in $7 19 - * return address in $28 (exceptions expect it there) 20 - * 21 - * Outputs: 22 - * bytes left to copy in $0 23 - * 24 - * Clobbers: 25 - * $1,$2,$3,$4,$5,$6 26 11 */ 27 12 #include <asm/export.h> 28 13 ··· 23 38 .set noreorder 24 39 .align 4 25 40 26 - .globl __do_clear_user 27 - .ent __do_clear_user 28 - .frame $30, 0, $28 41 + .globl __clear_user 42 + .ent __clear_user 43 + .frame $30, 0, $26 29 44 .prologue 0 30 45 31 46 $loop: 32 47 and $1, 3, $4 # e0 : 33 48 beq $4, 1f # .. e1 : 34 49 35 - 0: EX( stq_u $31, 0($6) ) # e0 : zero one word 50 + 0: EX( stq_u $31, 0($16) ) # e0 : zero one word 36 51 subq $0, 8, $0 # .. e1 : 37 52 subq $4, 1, $4 # e0 : 38 - addq $6, 8, $6 # .. e1 : 53 + addq $16, 8, $16 # .. e1 : 39 54 bne $4, 0b # e1 : 40 55 unop # : 41 56 42 57 1: bic $1, 3, $1 # e0 : 43 58 beq $1, $tail # .. e1 : 44 59 45 - 2: EX( stq_u $31, 0($6) ) # e0 : zero four words 60 + 2: EX( stq_u $31, 0($16) ) # e0 : zero four words 46 61 subq $0, 8, $0 # .. e1 : 47 - EX( stq_u $31, 8($6) ) # e0 : 62 + EX( stq_u $31, 8($16) ) # e0 : 48 63 subq $0, 8, $0 # .. e1 : 49 - EX( stq_u $31, 16($6) ) # e0 : 64 + EX( stq_u $31, 16($16) ) # e0 : 50 65 subq $0, 8, $0 # .. e1 : 51 - EX( stq_u $31, 24($6) ) # e0 : 66 + EX( stq_u $31, 24($16) ) # e0 : 52 67 subq $0, 8, $0 # .. e1 : 53 68 subq $1, 4, $1 # e0 : 54 - addq $6, 32, $6 # .. e1 : 69 + addq $16, 32, $16 # .. e1 : 55 70 bne $1, 2b # e1 : 56 71 57 72 $tail: 58 73 bne $2, 1f # e1 : is there a tail to do? 59 - ret $31, ($28), 1 # .. e1 : 74 + ret $31, ($26), 1 # .. e1 : 60 75 61 - 1: EX( ldq_u $5, 0($6) ) # e0 : 76 + 1: EX( ldq_u $5, 0($16) ) # e0 : 62 77 clr $0 # .. e1 : 63 78 nop # e1 : 64 79 mskqh $5, $0, $5 # e0 : 65 - EX( stq_u $5, 0($6) ) # e0 : 66 - ret $31, ($28), 1 # .. e1 : 80 + EX( stq_u $5, 0($16) ) # e0 : 81 + ret $31, ($26), 1 # .. e1 : 67 82 68 - __do_clear_user: 69 - and $6, 7, $4 # e0 : find dest misalignment 83 + __clear_user: 84 + and $17, $17, $0 85 + and $16, 7, $4 # e0 : find dest misalignment 70 86 beq $0, $zerolength # .. e1 : 71 87 addq $0, $4, $1 # e0 : bias counter 72 88 and $1, 7, $2 # e1 : number of bytes in tail 73 89 srl $1, 3, $1 # e0 : 74 90 beq $4, $loop # .. e1 : 75 91 76 - EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in 92 + EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in 77 93 beq $1, $oneword # .. e1 : sub-word store? 78 94 79 - mskql $5, $6, $5 # e0 : take care of misaligned head 80 - addq $6, 8, $6 # .. e1 : 81 - EX( stq_u $5, -8($6) ) # e0 : 95 + mskql $5, $16, $5 # e0 : take care of misaligned head 96 + addq $16, 8, $16 # .. e1 : 97 + EX( stq_u $5, -8($16) ) # e0 : 82 98 addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment 83 99 subq $1, 1, $1 # e0 : 84 100 subq $0, 8, $0 # .. e1 : ··· 87 101 unop # : 88 102 89 103 $oneword: 90 - mskql $5, $6, $4 # e0 : 104 + mskql $5, $16, $4 # e0 : 91 105 mskqh $5, $2, $5 # e0 : 92 106 or $5, $4, $5 # e1 : 93 - EX( stq_u $5, 0($6) ) # e0 : 107 + EX( stq_u $5, 0($16) ) # e0 : 94 108 clr $0 # .. e1 : 95 109 96 110 $zerolength: 97 111 $exception: 98 - ret $31, ($28), 1 # .. e1 : 112 + ret $31, ($26), 1 # .. e1 : 99 113 100 - .end __do_clear_user 101 - EXPORT_SYMBOL(__do_clear_user) 114 + .end __clear_user 115 + EXPORT_SYMBOL(__clear_user)
+34 -48
arch/alpha/lib/copy_user.S
··· 9 9 * contains the right "bytes left to copy" value (and that it is updated 10 10 * only _after_ a successful copy). There is also some rather minor 11 11 * exception setup stuff.. 12 - * 13 - * NOTE! This is not directly C-callable, because the calling semantics are 14 - * different: 15 - * 16 - * Inputs: 17 - * length in $0 18 - * destination address in $6 19 - * source address in $7 20 - * return address in $28 21 - * 22 - * Outputs: 23 - * bytes left to copy in $0 24 - * 25 - * Clobbers: 26 - * $1,$2,$3,$4,$5,$6,$7 27 12 */ 28 13 29 14 #include <asm/export.h> ··· 34 49 .ent __copy_user 35 50 __copy_user: 36 51 .prologue 0 37 - and $6,7,$3 52 + and $18,$18,$0 53 + and $16,7,$3 38 54 beq $0,$35 39 55 beq $3,$36 40 56 subq $3,8,$3 41 57 .align 4 42 58 $37: 43 - EXI( ldq_u $1,0($7) ) 44 - EXO( ldq_u $2,0($6) ) 45 - extbl $1,$7,$1 46 - mskbl $2,$6,$2 47 - insbl $1,$6,$1 59 + EXI( ldq_u $1,0($17) ) 60 + EXO( ldq_u $2,0($16) ) 61 + extbl $1,$17,$1 62 + mskbl $2,$16,$2 63 + insbl $1,$16,$1 48 64 addq $3,1,$3 49 65 bis $1,$2,$1 50 - EXO( stq_u $1,0($6) ) 66 + EXO( stq_u $1,0($16) ) 51 67 subq $0,1,$0 52 - addq $6,1,$6 53 - addq $7,1,$7 68 + addq $16,1,$16 69 + addq $17,1,$17 54 70 beq $0,$41 55 71 bne $3,$37 56 72 $36: 57 - and $7,7,$1 73 + and $17,7,$1 58 74 bic $0,7,$4 59 75 beq $1,$43 60 76 beq $4,$48 61 - EXI( ldq_u $3,0($7) ) 77 + EXI( ldq_u $3,0($17) ) 62 78 .align 4 63 79 $50: 64 - EXI( ldq_u $2,8($7) ) 80 + EXI( ldq_u $2,8($17) ) 65 81 subq $4,8,$4 66 - extql $3,$7,$3 67 - extqh $2,$7,$1 82 + extql $3,$17,$3 83 + extqh $2,$17,$1 68 84 bis $3,$1,$1 69 - EXO( stq $1,0($6) ) 70 - addq $7,8,$7 85 + EXO( stq $1,0($16) ) 86 + addq $17,8,$17 71 87 subq $0,8,$0 72 - addq $6,8,$6 88 + addq $16,8,$16 73 89 bis $2,$2,$3 74 90 bne $4,$50 75 91 $48: 76 92 beq $0,$41 77 93 .align 4 78 94 $57: 79 - EXI( ldq_u $1,0($7) ) 80 - EXO( ldq_u $2,0($6) ) 81 - extbl $1,$7,$1 82 - mskbl $2,$6,$2 83 - insbl $1,$6,$1 95 + EXI( ldq_u $1,0($17) ) 96 + EXO( ldq_u $2,0($16) ) 97 + extbl $1,$17,$1 98 + mskbl $2,$16,$2 99 + insbl $1,$16,$1 84 100 bis $1,$2,$1 85 - EXO( stq_u $1,0($6) ) 101 + EXO( stq_u $1,0($16) ) 86 102 subq $0,1,$0 87 - addq $6,1,$6 88 - addq $7,1,$7 103 + addq $16,1,$16 104 + addq $17,1,$17 89 105 bne $0,$57 90 106 br $31,$41 91 107 .align 4 ··· 94 108 beq $4,$65 95 109 .align 4 96 110 $66: 97 - EXI( ldq $1,0($7) ) 111 + EXI( ldq $1,0($17) ) 98 112 subq $4,8,$4 99 - EXO( stq $1,0($6) ) 100 - addq $7,8,$7 113 + EXO( stq $1,0($16) ) 114 + addq $17,8,$17 101 115 subq $0,8,$0 102 - addq $6,8,$6 116 + addq $16,8,$16 103 117 bne $4,$66 104 118 $65: 105 119 beq $0,$41 106 - EXI( ldq $2,0($7) ) 107 - EXO( ldq $1,0($6) ) 120 + EXI( ldq $2,0($17) ) 121 + EXO( ldq $1,0($16) ) 108 122 mskql $2,$0,$2 109 123 mskqh $1,$0,$1 110 124 bis $2,$1,$2 111 - EXO( stq $2,0($6) ) 125 + EXO( stq $2,0($16) ) 112 126 bis $31,$31,$0 113 127 $41: 114 128 $35: 115 129 $exitin: 116 130 $exitout: 117 - ret $31,($28),1 131 + ret $31,($26),1 118 132 119 133 .end __copy_user 120 134 EXPORT_SYMBOL(__copy_user)
+2 -8
arch/alpha/lib/csum_partial_copy.c
··· 45 45 __asm__ __volatile__( \ 46 46 "1: ldq_u %0,%2\n" \ 47 47 "2:\n" \ 48 - ".section __ex_table,\"a\"\n" \ 49 - " .long 1b - .\n" \ 50 - " lda %0,2b-1b(%1)\n" \ 51 - ".previous" \ 48 + EXC(1b,2b,%0,%1) \ 52 49 : "=r"(x), "=r"(__guu_err) \ 53 50 : "m"(__m(ptr)), "1"(0)); \ 54 51 __guu_err; \ ··· 57 60 __asm__ __volatile__( \ 58 61 "1: stq_u %2,%1\n" \ 59 62 "2:\n" \ 60 - ".section __ex_table,\"a\"\n" \ 61 - " .long 1b - ." \ 62 - " lda $31,2b-1b(%0)\n" \ 63 - ".previous" \ 63 + EXC(1b,2b,$31,%0) \ 64 64 : "=r"(__puu_err) \ 65 65 : "m"(__m(addr)), "rJ"(x), "0"(0)); \ 66 66 __puu_err; \
+35 -49
arch/alpha/lib/ev6-clear_user.S
··· 9 9 * a successful copy). There is also some rather minor exception setup 10 10 * stuff. 11 11 * 12 - * NOTE! This is not directly C-callable, because the calling semantics 13 - * are different: 14 - * 15 - * Inputs: 16 - * length in $0 17 - * destination address in $6 18 - * exception pointer in $7 19 - * return address in $28 (exceptions expect it there) 20 - * 21 - * Outputs: 22 - * bytes left to copy in $0 23 - * 24 - * Clobbers: 25 - * $1,$2,$3,$4,$5,$6 26 - * 27 12 * Much of the information about 21264 scheduling/coding comes from: 28 13 * Compiler Writer's Guide for the Alpha 21264 29 14 * abbreviated as 'CWG' in other comments here ··· 41 56 .set noreorder 42 57 .align 4 43 58 44 - .globl __do_clear_user 45 - .ent __do_clear_user 46 - .frame $30, 0, $28 59 + .globl __clear_user 60 + .ent __clear_user 61 + .frame $30, 0, $26 47 62 .prologue 0 48 63 49 64 # Pipeline info : Slotting & Comments 50 - __do_clear_user: 51 - and $6, 7, $4 # .. E .. .. : find dest head misalignment 65 + __clear_user: 66 + and $17, $17, $0 67 + and $16, 7, $4 # .. E .. .. : find dest head misalignment 52 68 beq $0, $zerolength # U .. .. .. : U L U L 53 69 54 70 addq $0, $4, $1 # .. .. .. E : bias counter ··· 61 75 62 76 /* 63 77 * Head is not aligned. Write (8 - $4) bytes to head of destination 64 - * This means $6 is known to be misaligned 78 + * This means $16 is known to be misaligned 65 79 */ 66 - EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in 80 + EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in 67 81 beq $1, $onebyte # .. .. U .. : sub-word store? 68 - mskql $5, $6, $5 # .. U .. .. : take care of misaligned head 69 - addq $6, 8, $6 # E .. .. .. : L U U L 82 + mskql $5, $16, $5 # .. U .. .. : take care of misaligned head 83 + addq $16, 8, $16 # E .. .. .. : L U U L 70 84 71 - EX( stq_u $5, -8($6) ) # .. .. .. L : 85 + EX( stq_u $5, -8($16) ) # .. .. .. L : 72 86 subq $1, 1, $1 # .. .. E .. : 73 87 addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment 74 88 subq $0, 8, $0 # E .. .. .. : U L U L ··· 79 93 * values upon initial entry to the loop 80 94 * $1 is number of quadwords to clear (zero is a valid value) 81 95 * $2 is number of trailing bytes (0..7) ($2 never used...) 82 - * $6 is known to be aligned 0mod8 96 + * $16 is known to be aligned 0mod8 83 97 */ 84 98 $headalign: 85 99 subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop 86 - and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop 100 + and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop 87 101 subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop) 88 102 blt $4, $trailquad # U .. .. .. : U L U L 89 103 ··· 100 114 beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64 101 115 102 116 $alignmod64: 103 - EX( stq_u $31, 0($6) ) # .. .. .. L 117 + EX( stq_u $31, 0($16) ) # .. .. .. L 104 118 addq $3, 8, $3 # .. .. E .. 105 119 subq $0, 8, $0 # .. E .. .. 106 120 nop # E .. .. .. : U L U L 107 121 108 122 nop # .. .. .. E 109 123 subq $1, 1, $1 # .. .. E .. 110 - addq $6, 8, $6 # .. E .. .. 124 + addq $16, 8, $16 # .. E .. .. 111 125 blt $3, $alignmod64 # U .. .. .. : U L U L 112 126 113 127 $bigalign: 114 128 /* 115 129 * $0 is the number of bytes left 116 130 * $1 is the number of quads left 117 - * $6 is aligned 0mod64 131 + * $16 is aligned 0mod64 118 132 * we know that we'll be taking a minimum of one trip through 119 133 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle 120 134 * We are _not_ going to update $0 after every single store. That ··· 131 145 nop # E : 132 146 nop # E : 133 147 nop # E : 134 - bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest 148 + bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest 135 149 /* This might actually help for the current trip... */ 136 150 137 151 $do_wh64: 138 152 wh64 ($3) # .. .. .. L1 : memory subsystem hint 139 153 subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop? 140 - EX( stq_u $31, 0($6) ) # .. L .. .. 154 + EX( stq_u $31, 0($16) ) # .. L .. .. 141 155 subq $0, 8, $0 # E .. .. .. : U L U L 142 156 143 - addq $6, 128, $3 # E : Target address of wh64 144 - EX( stq_u $31, 8($6) ) # L : 145 - EX( stq_u $31, 16($6) ) # L : 157 + addq $16, 128, $3 # E : Target address of wh64 158 + EX( stq_u $31, 8($16) ) # L : 159 + EX( stq_u $31, 16($16) ) # L : 146 160 subq $0, 16, $0 # E : U L L U 147 161 148 162 nop # E : 149 - EX( stq_u $31, 24($6) ) # L : 150 - EX( stq_u $31, 32($6) ) # L : 163 + EX( stq_u $31, 24($16) ) # L : 164 + EX( stq_u $31, 32($16) ) # L : 151 165 subq $0, 168, $5 # E : U L L U : two trips through the loop left? 152 166 /* 168 = 192 - 24, since we've already completed some stores */ 153 167 154 168 subq $0, 16, $0 # E : 155 - EX( stq_u $31, 40($6) ) # L : 156 - EX( stq_u $31, 48($6) ) # L : 157 - cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle 169 + EX( stq_u $31, 40($16) ) # L : 170 + EX( stq_u $31, 48($16) ) # L : 171 + cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle 158 172 159 173 subq $1, 8, $1 # E : 160 174 subq $0, 16, $0 # E : 161 - EX( stq_u $31, 56($6) ) # L : 175 + EX( stq_u $31, 56($16) ) # L : 162 176 nop # E : U L U L 163 177 164 178 nop # E : 165 179 subq $0, 8, $0 # E : 166 - addq $6, 64, $6 # E : 180 + addq $16, 64, $16 # E : 167 181 bge $4, $do_wh64 # U : U L U L 168 182 169 183 $trailquad: ··· 176 190 beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go 177 191 178 192 $onequad: 179 - EX( stq_u $31, 0($6) ) # .. .. .. L 193 + EX( stq_u $31, 0($16) ) # .. .. .. L 180 194 subq $1, 1, $1 # .. .. E .. 181 195 subq $0, 8, $0 # .. E .. .. 182 196 nop # E .. .. .. : U L U L 183 197 184 198 nop # .. .. .. E 185 199 nop # .. .. E .. 186 - addq $6, 8, $6 # .. E .. .. 200 + addq $16, 8, $16 # .. E .. .. 187 201 bgt $1, $onequad # U .. .. .. : U L U L 188 202 189 203 # We have an unknown number of bytes left to go. ··· 197 211 # so we will use $0 as the loop counter 198 212 # We know for a fact that $0 > 0 zero due to previous context 199 213 $onebyte: 200 - EX( stb $31, 0($6) ) # .. .. .. L 214 + EX( stb $31, 0($16) ) # .. .. .. L 201 215 subq $0, 1, $0 # .. .. E .. : 202 - addq $6, 1, $6 # .. E .. .. : 216 + addq $16, 1, $16 # .. E .. .. : 203 217 bgt $0, $onebyte # U .. .. .. : U L U L 204 218 205 219 $zerolength: ··· 207 221 nop # .. .. .. E : 208 222 nop # .. .. E .. : 209 223 nop # .. E .. .. : 210 - ret $31, ($28), 1 # L0 .. .. .. : L U L U 211 - .end __do_clear_user 212 - EXPORT_SYMBOL(__do_clear_user) 224 + ret $31, ($26), 1 # L0 .. .. .. : L U L U 225 + .end __clear_user 226 + EXPORT_SYMBOL(__clear_user)
+45 -59
arch/alpha/lib/ev6-copy_user.S
··· 12 12 * only _after_ a successful copy). There is also some rather minor 13 13 * exception setup stuff.. 14 14 * 15 - * NOTE! This is not directly C-callable, because the calling semantics are 16 - * different: 17 - * 18 - * Inputs: 19 - * length in $0 20 - * destination address in $6 21 - * source address in $7 22 - * return address in $28 23 - * 24 - * Outputs: 25 - * bytes left to copy in $0 26 - * 27 - * Clobbers: 28 - * $1,$2,$3,$4,$5,$6,$7 29 - * 30 15 * Much of the information about 21264 scheduling/coding comes from: 31 16 * Compiler Writer's Guide for the Alpha 21264 32 17 * abbreviated as 'CWG' in other comments here ··· 45 60 # Pipeline info: Slotting & Comments 46 61 __copy_user: 47 62 .prologue 0 48 - subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy? 63 + andq $18, $18, $0 64 + subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy? 49 65 beq $0, $zerolength # U .. .. .. : U L U L 50 66 51 - and $6,7,$3 # .. .. .. E : is leading dest misalignment 67 + and $16,7,$3 # .. .. .. E : is leading dest misalignment 52 68 ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data 53 69 beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall) 54 70 subq $3, 8, $3 # E .. .. .. : L U U L : trip counter ··· 59 73 * We know we have at least one trip through this loop 60 74 */ 61 75 $aligndest: 62 - EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores 63 - addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG 76 + EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores 77 + addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG 64 78 addq $3,1,$3 # .. E .. .. : 65 79 nop # E .. .. .. : U L U L 66 80 67 81 /* 68 - * the -1 is to compensate for the inc($6) done in a previous quadpack 82 + * the -1 is to compensate for the inc($16) done in a previous quadpack 69 83 * which allows us zero dependencies within either quadpack in the loop 70 84 */ 71 - EXO( stb $1,-1($6) ) # .. .. .. L : 72 - addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG 85 + EXO( stb $1,-1($16) ) # .. .. .. L : 86 + addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG 73 87 subq $0,1,$0 # .. E .. .. : 74 88 bne $3, $aligndest # U .. .. .. : U L U L 75 89 ··· 78 92 * If we arrived via branch, we have a minimum of 32 bytes 79 93 */ 80 94 $destaligned: 81 - and $7,7,$1 # .. .. .. E : Check _current_ source alignment 95 + and $17,7,$1 # .. .. .. E : Check _current_ source alignment 82 96 bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop 83 - EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code 97 + EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code 84 98 beq $1,$quadaligned # U .. .. .. : U L U L 85 99 86 100 /* 87 - * In the worst case, we've just executed an ldq_u here from 0($7) 101 + * In the worst case, we've just executed an ldq_u here from 0($17) 88 102 * and we'll repeat it once if we take the branch 89 103 */ 90 104 91 105 /* Misaligned quadword loop - not unrolled. Leave it that way. */ 92 106 $misquad: 93 - EXI( ldq_u $2,8($7) ) # .. .. .. L : 107 + EXI( ldq_u $2,8($17) ) # .. .. .. L : 94 108 subq $4,8,$4 # .. .. E .. : 95 - extql $3,$7,$3 # .. U .. .. : 96 - extqh $2,$7,$1 # U .. .. .. : U U L L 109 + extql $3,$17,$3 # .. U .. .. : 110 + extqh $2,$17,$1 # U .. .. .. : U U L L 97 111 98 112 bis $3,$1,$1 # .. .. .. E : 99 - EXO( stq $1,0($6) ) # .. .. L .. : 100 - addq $7,8,$7 # .. E .. .. : 113 + EXO( stq $1,0($16) ) # .. .. L .. : 114 + addq $17,8,$17 # .. E .. .. : 101 115 subq $0,8,$0 # E .. .. .. : U L L U 102 116 103 - addq $6,8,$6 # .. .. .. E : 117 + addq $16,8,$16 # .. .. .. E : 104 118 bis $2,$2,$3 # .. .. E .. : 105 119 nop # .. E .. .. : 106 120 bne $4,$misquad # U .. .. .. : U L U L ··· 111 125 beq $0,$zerolength # U .. .. .. : U L U L 112 126 113 127 /* We know we have at least one trip through the byte loop */ 114 - EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad 115 - addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) 128 + EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad 129 + addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) 116 130 nop # .. E .. .. : 117 131 br $31, $dirtyentry # L0 .. .. .. : L U U L 118 132 /* Do the trailing byte loop load, then hop into the store part of the loop */ ··· 122 136 * Based upon the usage context, it's worth the effort to unroll this loop 123 137 * $0 - number of bytes to be moved 124 138 * $4 - number of bytes to move as quadwords 125 - * $6 is current destination address 126 - * $7 is current source address 139 + * $16 is current destination address 140 + * $17 is current source address 127 141 */ 128 142 $quadaligned: 129 143 subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff ··· 141 155 * instruction memory hint instruction). 142 156 */ 143 157 $unroll4: 144 - EXI( ldq $1,0($7) ) # .. .. .. L 145 - EXI( ldq $2,8($7) ) # .. .. L .. 158 + EXI( ldq $1,0($17) ) # .. .. .. L 159 + EXI( ldq $2,8($17) ) # .. .. L .. 146 160 subq $4,32,$4 # .. E .. .. 147 161 nop # E .. .. .. : U U L L 148 162 149 - addq $7,16,$7 # .. .. .. E 150 - EXO( stq $1,0($6) ) # .. .. L .. 151 - EXO( stq $2,8($6) ) # .. L .. .. 163 + addq $17,16,$17 # .. .. .. E 164 + EXO( stq $1,0($16) ) # .. .. L .. 165 + EXO( stq $2,8($16) ) # .. L .. .. 152 166 subq $0,16,$0 # E .. .. .. : U L L U 153 167 154 - addq $6,16,$6 # .. .. .. E 155 - EXI( ldq $1,0($7) ) # .. .. L .. 156 - EXI( ldq $2,8($7) ) # .. L .. .. 168 + addq $16,16,$16 # .. .. .. E 169 + EXI( ldq $1,0($17) ) # .. .. L .. 170 + EXI( ldq $2,8($17) ) # .. L .. .. 157 171 subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip? 158 172 159 - EXO( stq $1,0($6) ) # .. .. .. L 160 - EXO( stq $2,8($6) ) # .. .. L .. 173 + EXO( stq $1,0($16) ) # .. .. .. L 174 + EXO( stq $2,8($16) ) # .. .. L .. 161 175 subq $0,16,$0 # .. E .. .. 162 - addq $7,16,$7 # E .. .. .. : U L L U 176 + addq $17,16,$17 # E .. .. .. : U L L U 163 177 164 178 nop # .. .. .. E 165 179 nop # .. .. E .. 166 - addq $6,16,$6 # .. E .. .. 180 + addq $16,16,$16 # .. E .. .. 167 181 bgt $3,$unroll4 # U .. .. .. : U L U L 168 182 169 183 nop ··· 172 186 beq $4, $noquads 173 187 174 188 $onequad: 175 - EXI( ldq $1,0($7) ) 189 + EXI( ldq $1,0($17) ) 176 190 subq $4,8,$4 177 - addq $7,8,$7 191 + addq $17,8,$17 178 192 nop 179 193 180 - EXO( stq $1,0($6) ) 194 + EXO( stq $1,0($16) ) 181 195 subq $0,8,$0 182 - addq $6,8,$6 196 + addq $16,8,$16 183 197 bne $4,$onequad 184 198 185 199 $noquads: ··· 193 207 * There's no point in doing a lot of complex alignment calculations to try to 194 208 * to quadword stuff for a small amount of data. 195 209 * $0 - remaining number of bytes left to copy 196 - * $6 - current dest addr 197 - * $7 - current source addr 210 + * $16 - current dest addr 211 + * $17 - current source addr 198 212 */ 199 213 200 214 $onebyteloop: 201 - EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad 202 - addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) 215 + EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad 216 + addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) 203 217 nop # .. E .. .. : 204 218 nop # E .. .. .. : U L U L 205 219 206 220 $dirtyentry: 207 221 /* 208 - * the -1 is to compensate for the inc($6) done in a previous quadpack 222 + * the -1 is to compensate for the inc($16) done in a previous quadpack 209 223 * which allows us zero dependencies within either quadpack in the loop 210 224 */ 211 - EXO ( stb $2,-1($6) ) # .. .. .. L : 212 - addq $7,1,$7 # .. .. E .. : quadpack as the load 225 + EXO ( stb $2,-1($16) ) # .. .. .. L : 226 + addq $17,1,$17 # .. .. E .. : quadpack as the load 213 227 subq $0,1,$0 # .. E .. .. : change count _after_ copy 214 228 bgt $0,$onebyteloop # U .. .. .. : U L U L 215 229 ··· 219 233 nop # .. .. .. E 220 234 nop # .. .. E .. 221 235 nop # .. E .. .. 222 - ret $31,($28),1 # L0 .. .. .. : L U L U 236 + ret $31,($26),1 # L0 .. .. .. : L U L U 223 237 224 238 .end __copy_user 225 239 EXPORT_SYMBOL(__copy_user)
+1
arch/arc/include/asm/Kbuild
··· 6 6 generic-y += div64.h 7 7 generic-y += emergency-restart.h 8 8 generic-y += errno.h 9 + generic-y += extable.h 9 10 generic-y += fb.h 10 11 generic-y += fcntl.h 11 12 generic-y += ftrace.h
+7 -18
arch/arc/include/asm/uaccess.h
··· 24 24 #ifndef _ASM_ARC_UACCESS_H 25 25 #define _ASM_ARC_UACCESS_H 26 26 27 - #include <linux/sched.h> 28 - #include <asm/errno.h> 29 27 #include <linux/string.h> /* for generic string functions */ 30 28 31 29 32 - #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 30 + #define __kernel_ok (uaccess_kernel()) 33 31 34 32 /* 35 33 * Algorithmically, for __user_ok() we want do: ··· 168 170 169 171 170 172 static inline unsigned long 171 - __arc_copy_from_user(void *to, const void __user *from, unsigned long n) 173 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 172 174 { 173 175 long res = 0; 174 176 char val; ··· 394 396 return res; 395 397 } 396 398 397 - extern unsigned long slowpath_copy_to_user(void __user *to, const void *from, 398 - unsigned long n); 399 - 400 399 static inline unsigned long 401 - __arc_copy_to_user(void __user *to, const void *from, unsigned long n) 400 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 402 401 { 403 402 long res = 0; 404 403 char val; ··· 721 726 } 722 727 723 728 #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 724 - #define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n) 725 - #define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n) 729 + 730 + #define INLINE_COPY_TO_USER 731 + #define INLINE_COPY_FROM_USER 732 + 726 733 #define __clear_user(d, n) __arc_clear_user(d, n) 727 734 #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n) 728 735 #define __strnlen_user(s, n) __arc_strnlen_user(s, n) 729 736 #else 730 - extern long arc_copy_from_user_noinline(void *to, const void __user * from, 731 - unsigned long n); 732 - extern long arc_copy_to_user_noinline(void __user *to, const void *from, 733 - unsigned long n); 734 737 extern unsigned long arc_clear_user_noinline(void __user *to, 735 738 unsigned long n); 736 739 extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src, 737 740 long count); 738 741 extern long arc_strnlen_user_noinline(const char __user *src, long n); 739 742 740 - #define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n) 741 - #define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n) 742 743 #define __clear_user(d, n) arc_clear_user_noinline(d, n) 743 744 #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n) 744 745 #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n) ··· 742 751 #endif 743 752 744 753 #include <asm-generic/uaccess.h> 745 - 746 - extern int fixup_exception(struct pt_regs *regs); 747 754 748 755 #endif
-14
arch/arc/mm/extable.c
··· 28 28 29 29 #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 30 30 31 - long arc_copy_from_user_noinline(void *to, const void __user *from, 32 - unsigned long n) 33 - { 34 - return __arc_copy_from_user(to, from, n); 35 - } 36 - EXPORT_SYMBOL(arc_copy_from_user_noinline); 37 - 38 - long arc_copy_to_user_noinline(void __user *to, const void *from, 39 - unsigned long n) 40 - { 41 - return __arc_copy_to_user(to, from, n); 42 - } 43 - EXPORT_SYMBOL(arc_copy_to_user_noinline); 44 - 45 31 unsigned long arc_clear_user_noinline(void __user *to, 46 32 unsigned long n) 47 33 {
-1
arch/arm/Kconfig
··· 41 41 select HARDIRQS_SW_RESEND 42 42 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) 43 43 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 44 - select HAVE_ARCH_HARDENED_USERCOPY 45 44 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU 46 45 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU 47 46 select HAVE_ARCH_MMAP_RND_BITS if MMU
+1
arch/arm/include/asm/Kbuild
··· 7 7 generic-y += emergency-restart.h 8 8 generic-y += errno.h 9 9 generic-y += exec.h 10 + generic-y += extable.h 10 11 generic-y += ioctl.h 11 12 generic-y += ipcbuf.h 12 13 generic-y += irq_regs.h
+18 -73
arch/arm/include/asm/uaccess.h
··· 12 12 * User space memory access functions 13 13 */ 14 14 #include <linux/string.h> 15 - #include <linux/thread_info.h> 16 - #include <asm/errno.h> 17 15 #include <asm/memory.h> 18 16 #include <asm/domain.h> 19 17 #include <asm/unified.h> ··· 24 26 #define __put_user_unaligned __put_user 25 27 #endif 26 28 27 - #define VERIFY_READ 0 28 - #define VERIFY_WRITE 1 29 - 30 - /* 31 - * The exception table consists of pairs of addresses: the first is the 32 - * address of an instruction that is allowed to fault, and the second is 33 - * the address at which the program should continue. No registers are 34 - * modified, so it is entirely up to the continuation code to figure out 35 - * what to do. 36 - * 37 - * All the routines below use bits of fixup code that are out of line 38 - * with the main instruction path. This means when everything is well, 39 - * we don't even have to jump over them. Further, they do not intrude 40 - * on our cache or tlb entries. 41 - */ 42 - 43 - struct exception_table_entry 44 - { 45 - unsigned long insn, fixup; 46 - }; 47 - 48 - extern int fixup_exception(struct pt_regs *regs); 29 + #include <asm/extable.h> 49 30 50 31 /* 51 32 * These two functions allow hooking accesses to userspace to increase ··· 248 271 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0) 249 272 250 273 #define user_addr_max() \ 251 - (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) 274 + (uaccess_kernel() ? ~0UL : get_fs()) 252 275 253 276 /* 254 277 * The "__xxx" versions of the user access functions do not verify the ··· 455 478 arm_copy_from_user(void *to, const void __user *from, unsigned long n); 456 479 457 480 static inline unsigned long __must_check 458 - __arch_copy_from_user(void *to, const void __user *from, unsigned long n) 481 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 459 482 { 460 483 unsigned int __ua_flags; 461 484 ··· 471 494 __copy_to_user_std(void __user *to, const void *from, unsigned long n); 472 495 473 496 static inline unsigned long __must_check 474 - __arch_copy_to_user(void __user *to, const void *from, unsigned long n) 497 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 475 498 { 476 499 #ifndef CONFIG_UACCESS_WITH_MEMCPY 477 500 unsigned int __ua_flags; ··· 499 522 } 500 523 501 524 #else 502 - #define __arch_copy_from_user(to, from, n) \ 503 - (memcpy(to, (void __force *)from, n), 0) 504 - #define __arch_copy_to_user(to, from, n) \ 505 - (memcpy((void __force *)to, from, n), 0) 525 + static inline unsigned long 526 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 527 + { 528 + memcpy(to, (const void __force *)from, n); 529 + return 0; 530 + } 531 + static inline unsigned long 532 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 533 + { 534 + memcpy((void __force *)to, from, n); 535 + return 0; 536 + } 506 537 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) 507 538 #endif 508 - 509 - static inline unsigned long __must_check 510 - __copy_from_user(void *to, const void __user *from, unsigned long n) 511 - { 512 - check_object_size(to, n, false); 513 - return __arch_copy_from_user(to, from, n); 514 - } 515 - 516 - static inline unsigned long __must_check 517 - copy_from_user(void *to, const void __user *from, unsigned long n) 518 - { 519 - unsigned long res = n; 520 - 521 - check_object_size(to, n, false); 522 - 523 - if (likely(access_ok(VERIFY_READ, from, n))) 524 - res = __arch_copy_from_user(to, from, n); 525 - if (unlikely(res)) 526 - memset(to + (n - res), 0, res); 527 - return res; 528 - } 529 - 530 - static inline unsigned long __must_check 531 - __copy_to_user(void __user *to, const void *from, unsigned long n) 532 - { 533 - check_object_size(from, n, true); 534 - 535 - return __arch_copy_to_user(to, from, n); 536 - } 537 - 538 - static inline unsigned long __must_check 539 - copy_to_user(void __user *to, const void *from, unsigned long n) 540 - { 541 - check_object_size(from, n, true); 542 - 543 - if (access_ok(VERIFY_WRITE, to, n)) 544 - n = __arch_copy_to_user(to, from, n); 545 - return n; 546 - } 547 - 548 - #define __copy_to_user_inatomic __copy_to_user 549 - #define __copy_from_user_inatomic __copy_from_user 539 + #define INLINE_COPY_TO_USER 540 + #define INLINE_COPY_FROM_USER 550 541 551 542 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 552 543 {
+2 -2
arch/arm/lib/uaccess_with_memcpy.c
··· 90 90 unsigned long ua_flags; 91 91 int atomic; 92 92 93 - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 93 + if (uaccess_kernel()) { 94 94 memcpy((void *)to, from, n); 95 95 return 0; 96 96 } ··· 162 162 { 163 163 unsigned long ua_flags; 164 164 165 - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 165 + if (uaccess_kernel()) { 166 166 memset((void *)addr, 0, n); 167 167 return 0; 168 168 }
-1
arch/arm64/Kconfig
··· 60 60 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 61 61 select HAVE_ARCH_AUDITSYSCALL 62 62 select HAVE_ARCH_BITREVERSE 63 - select HAVE_ARCH_HARDENED_USERCOPY 64 63 select HAVE_ARCH_HUGE_VMAP 65 64 select HAVE_ARCH_JUMP_LABEL 66 65 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+25
arch/arm64/include/asm/extable.h
··· 1 + #ifndef __ASM_EXTABLE_H 2 + #define __ASM_EXTABLE_H 3 + 4 + /* 5 + * The exception table consists of pairs of relative offsets: the first 6 + * is the relative offset to an instruction that is allowed to fault, 7 + * and the second is the relative offset at which the program should 8 + * continue. No registers are modified, so it is entirely up to the 9 + * continuation code to figure out what to do. 10 + * 11 + * All the routines below use bits of fixup code that are out of line 12 + * with the main instruction path. This means when everything is well, 13 + * we don't even have to jump over them. Further, they do not intrude 14 + * on our cache or tlb entries. 15 + */ 16 + 17 + struct exception_table_entry 18 + { 19 + int insn, fixup; 20 + }; 21 + 22 + #define ARCH_HAS_RELATIVE_EXTABLE 23 + 24 + extern int fixup_exception(struct pt_regs *regs); 25 + #endif
+6 -77
arch/arm64/include/asm/uaccess.h
··· 28 28 #include <linux/bitops.h> 29 29 #include <linux/kasan-checks.h> 30 30 #include <linux/string.h> 31 - #include <linux/thread_info.h> 32 31 33 32 #include <asm/cpufeature.h> 34 33 #include <asm/ptrace.h> 35 - #include <asm/errno.h> 36 34 #include <asm/memory.h> 37 35 #include <asm/compiler.h> 38 - 39 - #define VERIFY_READ 0 40 - #define VERIFY_WRITE 1 41 - 42 - /* 43 - * The exception table consists of pairs of relative offsets: the first 44 - * is the relative offset to an instruction that is allowed to fault, 45 - * and the second is the relative offset at which the program should 46 - * continue. No registers are modified, so it is entirely up to the 47 - * continuation code to figure out what to do. 48 - * 49 - * All the routines below use bits of fixup code that are out of line 50 - * with the main instruction path. This means when everything is well, 51 - * we don't even have to jump over them. Further, they do not intrude 52 - * on our cache or tlb entries. 53 - */ 54 - 55 - struct exception_table_entry 56 - { 57 - int insn, fixup; 58 - }; 59 - 60 - #define ARCH_HAS_RELATIVE_EXTABLE 61 - 62 - extern int fixup_exception(struct pt_regs *regs); 36 + #include <asm/extable.h> 63 37 64 38 #define KERNEL_DS (-1UL) 65 39 #define get_ds() (KERNEL_DS) ··· 331 357 }) 332 358 333 359 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 360 + #define raw_copy_from_user __arch_copy_from_user 334 361 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 335 - extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); 362 + #define raw_copy_to_user __arch_copy_to_user 363 + extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); 336 364 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 337 - 338 - static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) 339 - { 340 - kasan_check_write(to, n); 341 - check_object_size(to, n, false); 342 - return __arch_copy_from_user(to, from, n); 343 - } 344 - 345 - static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) 346 - { 347 - kasan_check_read(from, n); 348 - check_object_size(from, n, true); 349 - return __arch_copy_to_user(to, from, n); 350 - } 351 - 352 - static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 353 - { 354 - unsigned long res = n; 355 - kasan_check_write(to, n); 356 - check_object_size(to, n, false); 357 - 358 - if (access_ok(VERIFY_READ, from, n)) { 359 - res = __arch_copy_from_user(to, from, n); 360 - } 361 - if (unlikely(res)) 362 - memset(to + (n - res), 0, res); 363 - return res; 364 - } 365 - 366 - static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) 367 - { 368 - kasan_check_read(from, n); 369 - check_object_size(from, n, true); 370 - 371 - if (access_ok(VERIFY_WRITE, to, n)) { 372 - n = __arch_copy_to_user(to, from, n); 373 - } 374 - return n; 375 - } 376 - 377 - static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) 378 - { 379 - if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)) 380 - n = __copy_in_user(to, from, n); 381 - return n; 382 - } 383 - 384 - #define __copy_to_user_inatomic __copy_to_user 385 - #define __copy_from_user_inatomic __copy_from_user 365 + #define INLINE_COPY_TO_USER 366 + #define INLINE_COPY_FROM_USER 386 367 387 368 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 388 369 {
+1 -1
arch/arm64/kernel/arm64ksyms.c
··· 38 38 EXPORT_SYMBOL(__arch_copy_from_user); 39 39 EXPORT_SYMBOL(__arch_copy_to_user); 40 40 EXPORT_SYMBOL(__clear_user); 41 - EXPORT_SYMBOL(__copy_in_user); 41 + EXPORT_SYMBOL(raw_copy_in_user); 42 42 43 43 /* physical memory */ 44 44 EXPORT_SYMBOL(memstart_addr);
+2 -2
arch/arm64/lib/copy_in_user.S
··· 64 64 .endm 65 65 66 66 end .req x5 67 - ENTRY(__copy_in_user) 67 + ENTRY(raw_copy_in_user) 68 68 uaccess_enable_not_uao x3, x4 69 69 add end, x0, x2 70 70 #include "copy_template.S" 71 71 uaccess_disable_not_uao x3 72 72 mov x0, #0 73 73 ret 74 - ENDPROC(__copy_in_user) 74 + ENDPROC(raw_copy_in_user) 75 75 76 76 .section .fixup,"ax" 77 77 .align 2
+1
arch/avr32/include/asm/Kbuild
··· 5 5 generic-y += div64.h 6 6 generic-y += emergency-restart.h 7 7 generic-y += exec.h 8 + generic-y += extable.h 8 9 generic-y += futex.h 9 10 generic-y += irq_regs.h 10 11 generic-y += irq_work.h
+7 -32
arch/avr32/include/asm/uaccess.h
··· 8 8 #ifndef __ASM_AVR32_UACCESS_H 9 9 #define __ASM_AVR32_UACCESS_H 10 10 11 - #include <linux/errno.h> 12 - #include <linux/sched.h> 13 - 14 - #define VERIFY_READ 0 15 - #define VERIFY_WRITE 1 16 - 17 11 typedef struct { 18 12 unsigned int is_user_space; 19 13 } mm_segment_t; ··· 66 72 extern __kernel_size_t __copy_user(void *to, const void *from, 67 73 __kernel_size_t n); 68 74 69 - extern __kernel_size_t copy_to_user(void __user *to, const void *from, 70 - __kernel_size_t n); 71 - extern __kernel_size_t ___copy_from_user(void *to, const void __user *from, 72 - __kernel_size_t n); 73 - 74 - static inline __kernel_size_t __copy_to_user(void __user *to, const void *from, 75 - __kernel_size_t n) 75 + static inline unsigned long 76 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 76 77 { 77 78 return __copy_user((void __force *)to, from, n); 78 79 } 79 - static inline __kernel_size_t __copy_from_user(void *to, 80 - const void __user *from, 81 - __kernel_size_t n) 80 + static inline unsigned long 81 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 82 82 { 83 83 return __copy_user(to, (const void __force *)from, n); 84 84 } 85 - static inline __kernel_size_t copy_from_user(void *to, 86 - const void __user *from, 87 - __kernel_size_t n) 88 - { 89 - size_t res = ___copy_from_user(to, from, n); 90 - if (unlikely(res)) 91 - memset(to + (n - res), 0, res); 92 - return res; 93 - } 94 - 95 - #define __copy_to_user_inatomic __copy_to_user 96 - #define __copy_from_user_inatomic __copy_from_user 85 + #define INLINE_COPY_FROM_USER 86 + #define INLINE_COPY_TO_USER 97 87 98 88 /* 99 89 * put_user: - Write a simple value into user space. ··· 307 329 308 330 #define strlen_user(s) strnlen_user(s, ~0UL >> 1) 309 331 310 - struct exception_table_entry 311 - { 312 - unsigned long insn, fixup; 313 - }; 332 + #include <asm/extable.h> 314 333 315 334 #endif /* __ASM_AVR32_UACCESS_H */
-2
arch/avr32/kernel/avr32_ksyms.c
··· 36 36 /* 37 37 * Userspace access stuff. 38 38 */ 39 - EXPORT_SYMBOL(___copy_from_user); 40 - EXPORT_SYMBOL(copy_to_user); 41 39 EXPORT_SYMBOL(__copy_user); 42 40 EXPORT_SYMBOL(strncpy_from_user); 43 41 EXPORT_SYMBOL(__strncpy_from_user);
-15
arch/avr32/lib/copy_user.S
··· 23 23 */ 24 24 .text 25 25 .align 1 26 - .global ___copy_from_user 27 - .type ___copy_from_user, @function 28 - ___copy_from_user: 29 - branch_if_kernel r8, __copy_user 30 - ret_if_privileged r8, r11, r10, r10 31 - rjmp __copy_user 32 - .size ___copy_from_user, . - ___copy_from_user 33 - 34 - .global copy_to_user 35 - .type copy_to_user, @function 36 - copy_to_user: 37 - branch_if_kernel r8, __copy_user 38 - ret_if_privileged r8, r12, r10, r10 39 - .size copy_to_user, . - copy_to_user 40 - 41 26 .global __copy_user 42 27 .type __copy_user, @function 43 28 __copy_user:
+1
arch/blackfin/include/asm/Kbuild
··· 7 7 generic-y += div64.h 8 8 generic-y += emergency-restart.h 9 9 generic-y += errno.h 10 + generic-y += extable.h 10 11 generic-y += fb.h 11 12 generic-y += futex.h 12 13 generic-y += hw_irq.h
+5 -42
arch/blackfin/include/asm/uaccess.h
··· 12 12 /* 13 13 * User space memory access functions 14 14 */ 15 - #include <linux/sched.h> 16 15 #include <linux/mm.h> 17 16 #include <linux/string.h> 18 17 ··· 28 29 29 30 #define segment_eq(a, b) ((a) == (b)) 30 31 31 - #define VERIFY_READ 0 32 - #define VERIFY_WRITE 1 33 - 34 32 #define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size)) 35 33 36 34 /* ··· 42 46 extern int _access_ok(unsigned long addr, unsigned long size); 43 47 #endif 44 48 45 - /* 46 - * The exception table consists of pairs of addresses: the first is the 47 - * address of an instruction that is allowed to fault, and the second is 48 - * the address at which the program should continue. No registers are 49 - * modified, so it is entirely up to the continuation code to figure out 50 - * what to do. 51 - * 52 - * All the routines below use bits of fixup code that are out of line 53 - * with the main instruction path. This means when everything is well, 54 - * we don't even have to jump over them. Further, they do not intrude 55 - * on our cache or tlb entries. 56 - */ 57 - 58 - struct exception_table_entry { 59 - unsigned long insn, fixup; 60 - }; 49 + #include <asm/extable.h> 61 50 62 51 /* 63 52 * These are the main single-value transfer routines. They automatically ··· 144 163 : "a" (__ptr(ptr))); \ 145 164 }) 146 165 147 - #define __copy_to_user_inatomic __copy_to_user 148 - #define __copy_from_user_inatomic __copy_from_user 149 - 150 166 static inline unsigned long __must_check 151 - __copy_from_user(void *to, const void __user *from, unsigned long n) 167 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 152 168 { 153 169 memcpy(to, (const void __force *)from, n); 154 170 return 0; 155 171 } 156 172 157 173 static inline unsigned long __must_check 158 - __copy_to_user(void __user *to, const void *from, unsigned long n) 174 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 159 175 { 160 176 memcpy((void __force *)to, from, n); 161 177 SSYNC(); 162 178 return 0; 163 179 } 164 180 165 - static inline unsigned long __must_check 166 - copy_from_user(void *to, const void __user *from, unsigned long n) 167 - { 168 - if (likely(access_ok(VERIFY_READ, from, n))) 169 - return __copy_from_user(to, from, n); 170 - memset(to, 0, n); 171 - return n; 172 - } 173 - 174 - static inline unsigned long __must_check 175 - copy_to_user(void __user *to, const void *from, unsigned long n) 176 - { 177 - if (likely(access_ok(VERIFY_WRITE, to, n))) 178 - return __copy_to_user(to, from, n); 179 - return n; 180 - } 181 - 181 + #define INLINE_COPY_FROM_USER 182 + #define INLINE_COPY_TO_USER 182 183 /* 183 184 * Copy a null terminated string from userspace. 184 185 */
+1 -1
arch/blackfin/kernel/process.c
··· 370 370 /* Check that things do not wrap around */ 371 371 if (addr > ULONG_MAX - size) 372 372 return 0; 373 - if (segment_eq(get_fs(), KERNEL_DS)) 373 + if (uaccess_kernel()) 374 374 return 1; 375 375 #ifdef CONFIG_MTD_UCLINUX 376 376 if (1)
+1
arch/c6x/include/asm/Kbuild
··· 12 12 generic-y += emergency-restart.h 13 13 generic-y += errno.h 14 14 generic-y += exec.h 15 + generic-y += extable.h 15 16 generic-y += fb.h 16 17 generic-y += fcntl.h 17 18 generic-y += futex.h
+6 -13
arch/c6x/include/asm/uaccess.h
··· 13 13 #include <linux/compiler.h> 14 14 #include <linux/string.h> 15 15 16 - #ifdef CONFIG_ACCESS_CHECK 17 - #define __access_ok _access_ok 18 - #endif 19 - 20 16 /* 21 - * __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h 22 - * 23 17 * C6X supports unaligned 32 and 64 bit loads and stores. 24 18 */ 25 - static inline __must_check long __copy_from_user(void *to, 26 - const void __user *from, unsigned long n) 19 + static inline __must_check unsigned long 20 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 27 21 { 28 22 u32 tmp32; 29 23 u64 tmp64; ··· 52 58 return 0; 53 59 } 54 60 55 - static inline __must_check long __copy_to_user(void __user *to, 56 - const void *from, unsigned long n) 61 + static inline __must_check unsigned long 62 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 57 63 { 58 64 u32 tmp32; 59 65 u64 tmp64; ··· 87 93 memcpy((void __force *)to, from, n); 88 94 return 0; 89 95 } 90 - 91 - #define __copy_to_user __copy_to_user 92 - #define __copy_from_user __copy_from_user 96 + #define INLINE_COPY_FROM_USER 97 + #define INLINE_COPY_TO_USER 93 98 94 99 extern int _access_ok(unsigned long addr, unsigned long size); 95 100 #ifdef CONFIG_ACCESS_CHECK
+1 -1
arch/c6x/kernel/sys_c6x.c
··· 23 23 if (!addr || addr > (0xffffffffUL - (size - 1))) 24 24 goto _bad_access; 25 25 26 - if (segment_eq(get_fs(), KERNEL_DS)) 26 + if (uaccess_kernel()) 27 27 return 1; 28 28 29 29 if (memory_start <= addr && (addr + size - 1) < memory_end)
+9 -22
arch/cris/arch-v10/lib/usercopy.c
··· 188 188 } 189 189 EXPORT_SYMBOL(__copy_user); 190 190 191 - /* Copy from user to kernel, zeroing the bytes that were inaccessible in 192 - userland. The return-value is the number of bytes that were 191 + /* Copy from user to kernel. The return-value is the number of bytes that were 193 192 inaccessible. */ 194 193 195 - unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, 194 + unsigned long __copy_user_in(void *pdst, const void __user *psrc, 196 195 unsigned long pn) 197 196 { 198 197 /* We want the parameters put in special registers. ··· 216 217 { 217 218 __asm_copy_from_user_1 (dst, src, retn); 218 219 n--; 220 + if (retn) 221 + goto exception; 219 222 } 220 223 221 224 if (((unsigned long) src & 2) && n >= 2) 222 225 { 223 226 __asm_copy_from_user_2 (dst, src, retn); 224 227 n -= 2; 228 + if (retn) 229 + goto exception; 225 230 } 226 - 227 - /* We only need one check after the unalignment-adjustments, because 228 - if both adjustments were done, either both or neither reference 229 - had an exception. */ 230 - if (retn != 0) 231 - goto copy_exception_bytes; 232 231 } 233 232 234 233 /* Decide which copying method to use. */ ··· 325 328 n -= 4; 326 329 327 330 if (retn) 328 - goto copy_exception_bytes; 331 + goto exception; 329 332 } 330 333 331 334 /* If we get here, there were no memory read faults. */ ··· 353 356 bytes. */ 354 357 return retn; 355 358 356 - copy_exception_bytes: 357 - /* We already have "retn" bytes cleared, and need to clear the 358 - remaining "n" bytes. A non-optimized simple byte-for-byte in-line 359 - memset is preferred here, since this isn't speed-critical code and 360 - we'd rather have this a leaf-function than calling memset. */ 361 - { 362 - char *endp; 363 - for (endp = dst + n; dst < endp; dst++) 364 - *dst = 0; 365 - } 366 - 359 + exception: 367 360 return retn + n; 368 361 } 369 - EXPORT_SYMBOL(__copy_user_zeroing); 362 + EXPORT_SYMBOL(__copy_user_in); 370 363 371 364 /* Zero userspace. */ 372 365 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
+9 -21
arch/cris/arch-v32/lib/usercopy.c
··· 156 156 } 157 157 EXPORT_SYMBOL(__copy_user); 158 158 159 - /* Copy from user to kernel, zeroing the bytes that were inaccessible in 160 - userland. The return-value is the number of bytes that were 159 + /* Copy from user to kernel. The return-value is the number of bytes that were 161 160 inaccessible. */ 162 - unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, 161 + unsigned long __copy_user_in(void *pdst, const void __user *psrc, 163 162 unsigned long pn) 164 163 { 165 164 /* We want the parameters put in special registers. ··· 183 184 { 184 185 __asm_copy_from_user_1 (dst, src, retn); 185 186 n--; 187 + if (retn != 0) 188 + goto exception; 186 189 } 187 190 188 191 if (((unsigned long) src & 2) && n >= 2) 189 192 { 190 193 __asm_copy_from_user_2 (dst, src, retn); 191 194 n -= 2; 195 + if (retn != 0) 196 + goto exception; 192 197 } 193 198 194 - /* We only need one check after the unalignment-adjustments, because 195 - if both adjustments were done, either both or neither reference 196 - had an exception. */ 197 - if (retn != 0) 198 - goto copy_exception_bytes; 199 199 } 200 200 201 201 /* Movem is dirt cheap. The overheap is low enough to always use the ··· 277 279 n -= 4; 278 280 279 281 if (retn) 280 - goto copy_exception_bytes; 282 + goto exception; 281 283 } 282 284 283 285 /* If we get here, there were no memory read faults. */ ··· 305 307 bytes. */ 306 308 return retn; 307 309 308 - copy_exception_bytes: 309 - /* We already have "retn" bytes cleared, and need to clear the 310 - remaining "n" bytes. A non-optimized simple byte-for-byte in-line 311 - memset is preferred here, since this isn't speed-critical code and 312 - we'd rather have this a leaf-function than calling memset. */ 313 - { 314 - char *endp; 315 - for (endp = dst + n; dst < endp; dst++) 316 - *dst = 0; 317 - } 318 - 310 + exception: 319 311 return retn + n; 320 312 } 321 - EXPORT_SYMBOL(__copy_user_zeroing); 313 + EXPORT_SYMBOL(__copy_user_in); 322 314 323 315 /* Zero userspace. */ 324 316 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
+18 -28
arch/cris/include/arch-v10/arch/uaccess.h
··· 172 172 __asm_copy_user_cont(to, from, ret, \ 173 173 " move.b [%1+],$r9\n" \ 174 174 "2: move.b $r9,[%0+]\n", \ 175 - "3: addq 1,%2\n" \ 176 - " clear.b [%0+]\n", \ 175 + "3: addq 1,%2\n", \ 177 176 " .dword 2b,3b\n") 178 177 179 178 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 180 179 __asm_copy_user_cont(to, from, ret, \ 181 180 " move.w [%1+],$r9\n" \ 182 181 "2: move.w $r9,[%0+]\n" COPY, \ 183 - "3: addq 2,%2\n" \ 184 - " clear.w [%0+]\n" FIXUP, \ 182 + "3: addq 2,%2\n" FIXUP, \ 185 183 " .dword 2b,3b\n" TENTRY) 186 184 187 185 #define __asm_copy_from_user_2(to, from, ret) \ ··· 189 191 __asm_copy_from_user_2x_cont(to, from, ret, \ 190 192 " move.b [%1+],$r9\n" \ 191 193 "4: move.b $r9,[%0+]\n", \ 192 - "5: addq 1,%2\n" \ 193 - " clear.b [%0+]\n", \ 194 + "5: addq 1,%2\n", \ 194 195 " .dword 4b,5b\n") 195 196 196 197 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 197 198 __asm_copy_user_cont(to, from, ret, \ 198 199 " move.d [%1+],$r9\n" \ 199 200 "2: move.d $r9,[%0+]\n" COPY, \ 200 - "3: addq 4,%2\n" \ 201 - " clear.d [%0+]\n" FIXUP, \ 201 + "3: addq 4,%2\n" FIXUP, \ 202 202 " .dword 2b,3b\n" TENTRY) 203 203 204 204 #define __asm_copy_from_user_4(to, from, ret) \ ··· 206 210 __asm_copy_from_user_4x_cont(to, from, ret, \ 207 211 " move.b [%1+],$r9\n" \ 208 212 "4: move.b $r9,[%0+]\n", \ 209 - "5: addq 1,%2\n" \ 210 - " clear.b [%0+]\n", \ 213 + "5: addq 1,%2\n", \ 211 214 " .dword 4b,5b\n") 212 215 213 216 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 214 219 " move.w [%1+],$r9\n" \ 215 220 "4: move.w $r9,[%0+]\n" COPY, \ 216 221 "5: addq 2,%2\n" \ 217 - " clear.w [%0+]\n" FIXUP, \ 222 + FIXUP, \ 218 223 " .dword 4b,5b\n" TENTRY) 219 224 220 225 #define __asm_copy_from_user_6(to, from, ret) \ ··· 224 229 __asm_copy_from_user_6x_cont(to, from, ret, \ 225 230 " move.b [%1+],$r9\n" \ 226 231 "6: move.b $r9,[%0+]\n", \ 227 - "7: addq 1,%2\n" \ 228 - " clear.b [%0+]\n", \ 232 + "7: addq 1,%2\n", \ 229 233 " .dword 6b,7b\n") 230 234 231 235 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 232 238 " move.d [%1+],$r9\n" \ 233 239 "4: move.d $r9,[%0+]\n" COPY, \ 234 240 "5: addq 4,%2\n" \ 235 - " clear.d [%0+]\n" FIXUP, \ 241 + FIXUP, \ 236 242 " .dword 4b,5b\n" TENTRY) 237 243 238 244 #define __asm_copy_from_user_8(to, from, ret) \ ··· 242 248 __asm_copy_from_user_8x_cont(to, from, ret, \ 243 249 " move.b [%1+],$r9\n" \ 244 250 "6: move.b $r9,[%0+]\n", \ 245 - "7: addq 1,%2\n" \ 246 - " clear.b [%0+]\n", \ 251 + "7: addq 1,%2\n", \ 247 252 " .dword 6b,7b\n") 248 253 249 254 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 250 257 " move.w [%1+],$r9\n" \ 251 258 "6: move.w $r9,[%0+]\n" COPY, \ 252 259 "7: addq 2,%2\n" \ 253 - " clear.w [%0+]\n" FIXUP, \ 260 + FIXUP, \ 254 261 " .dword 6b,7b\n" TENTRY) 255 262 256 263 #define __asm_copy_from_user_10(to, from, ret) \ ··· 260 267 __asm_copy_from_user_10x_cont(to, from, ret, \ 261 268 " move.b [%1+],$r9\n" \ 262 269 "8: move.b $r9,[%0+]\n", \ 263 - "9: addq 1,%2\n" \ 264 - " clear.b [%0+]\n", \ 270 + "9: addq 1,%2\n", \ 265 271 " .dword 8b,9b\n") 266 272 267 273 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 268 276 " move.d [%1+],$r9\n" \ 269 277 "6: move.d $r9,[%0+]\n" COPY, \ 270 278 "7: addq 4,%2\n" \ 271 - " clear.d [%0+]\n" FIXUP, \ 279 + FIXUP, \ 272 280 " .dword 6b,7b\n" TENTRY) 273 281 274 282 #define __asm_copy_from_user_12(to, from, ret) \ ··· 278 286 __asm_copy_from_user_12x_cont(to, from, ret, \ 279 287 " move.b [%1+],$r9\n" \ 280 288 "8: move.b $r9,[%0+]\n", \ 281 - "9: addq 1,%2\n" \ 282 - " clear.b [%0+]\n", \ 289 + "9: addq 1,%2\n", \ 283 290 " .dword 8b,9b\n") 284 291 285 292 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 286 295 " move.w [%1+],$r9\n" \ 287 296 "8: move.w $r9,[%0+]\n" COPY, \ 288 297 "9: addq 2,%2\n" \ 289 - " clear.w [%0+]\n" FIXUP, \ 298 + FIXUP, \ 290 299 " .dword 8b,9b\n" TENTRY) 291 300 292 301 #define __asm_copy_from_user_14(to, from, ret) \ ··· 296 305 __asm_copy_from_user_14x_cont(to, from, ret, \ 297 306 " move.b [%1+],$r9\n" \ 298 307 "10: move.b $r9,[%0+]\n", \ 299 - "11: addq 1,%2\n" \ 300 - " clear.b [%0+]\n", \ 308 + "11: addq 1,%2\n", \ 301 309 " .dword 10b,11b\n") 302 310 303 311 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 304 314 " move.d [%1+],$r9\n" \ 305 315 "8: move.d $r9,[%0+]\n" COPY, \ 306 316 "9: addq 4,%2\n" \ 307 - " clear.d [%0+]\n" FIXUP, \ 317 + FIXUP, \ 308 318 " .dword 8b,9b\n" TENTRY) 309 319 310 320 #define __asm_copy_from_user_16(to, from, ret) \ ··· 315 325 " move.d [%1+],$r9\n" \ 316 326 "10: move.d $r9,[%0+]\n" COPY, \ 317 327 "11: addq 4,%2\n" \ 318 - " clear.d [%0+]\n" FIXUP, \ 328 + FIXUP, \ 319 329 " .dword 10b,11b\n" TENTRY) 320 330 321 331 #define __asm_copy_from_user_20(to, from, ret) \ ··· 326 336 " move.d [%1+],$r9\n" \ 327 337 "12: move.d $r9,[%0+]\n" COPY, \ 328 338 "13: addq 4,%2\n" \ 329 - " clear.d [%0+]\n" FIXUP, \ 339 + FIXUP, \ 330 340 " .dword 12b,13b\n" TENTRY) 331 341 332 342 #define __asm_copy_from_user_24(to, from, ret) \
+18 -36
arch/cris/include/arch-v32/arch/uaccess.h
··· 178 178 "2: move.b [%1+],$acr\n" \ 179 179 " move.b $acr,[%0+]\n", \ 180 180 "3: addq 1,%2\n" \ 181 - " jump 1b\n" \ 182 - " clear.b [%0+]\n", \ 181 + " jump 1b\n", \ 183 182 " .dword 2b,3b\n") 184 183 185 184 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 188 189 " move.w $acr,[%0+]\n", \ 189 190 FIXUP \ 190 191 "3: addq 2,%2\n" \ 191 - " jump 1b\n" \ 192 - " clear.w [%0+]\n", \ 192 + " jump 1b\n", \ 193 193 TENTRY \ 194 194 " .dword 2b,3b\n") 195 195 ··· 199 201 __asm_copy_from_user_2x_cont(to, from, ret, \ 200 202 "4: move.b [%1+],$acr\n" \ 201 203 " move.b $acr,[%0+]\n", \ 202 - "5: addq 1,%2\n" \ 203 - " clear.b [%0+]\n", \ 204 + "5: addq 1,%2\n", \ 204 205 " .dword 4b,5b\n") 205 206 206 207 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 209 212 " move.d $acr,[%0+]\n", \ 210 213 FIXUP \ 211 214 "3: addq 4,%2\n" \ 212 - " jump 1b\n" \ 213 - " clear.d [%0+]\n", \ 215 + " jump 1b\n", \ 214 216 TENTRY \ 215 217 " .dword 2b,3b\n") 216 218 ··· 220 224 __asm_copy_from_user_4x_cont(to, from, ret, \ 221 225 "4: move.b [%1+],$acr\n" \ 222 226 " move.b $acr,[%0+]\n", \ 223 - "5: addq 1,%2\n" \ 224 - " clear.b [%0+]\n", \ 227 + "5: addq 1,%2\n", \ 225 228 " .dword 4b,5b\n") 226 229 227 230 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 229 234 "4: move.w [%1+],$acr\n" \ 230 235 " move.w $acr,[%0+]\n", \ 231 236 FIXUP \ 232 - "5: addq 2,%2\n" \ 233 - " clear.w [%0+]\n", \ 237 + "5: addq 2,%2\n", \ 234 238 TENTRY \ 235 239 " .dword 4b,5b\n") 236 240 ··· 240 246 __asm_copy_from_user_6x_cont(to, from, ret, \ 241 247 "6: move.b [%1+],$acr\n" \ 242 248 " move.b $acr,[%0+]\n", \ 243 - "7: addq 1,%2\n" \ 244 - " clear.b [%0+]\n", \ 249 + "7: addq 1,%2\n", \ 245 250 " .dword 6b,7b\n") 246 251 247 252 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 249 256 "4: move.d [%1+],$acr\n" \ 250 257 " move.d $acr,[%0+]\n", \ 251 258 FIXUP \ 252 - "5: addq 4,%2\n" \ 253 - " clear.d [%0+]\n", \ 259 + "5: addq 4,%2\n", \ 254 260 TENTRY \ 255 261 " .dword 4b,5b\n") 256 262 ··· 260 268 __asm_copy_from_user_8x_cont(to, from, ret, \ 261 269 "6: move.b [%1+],$acr\n" \ 262 270 " move.b $acr,[%0+]\n", \ 263 - "7: addq 1,%2\n" \ 264 - " clear.b [%0+]\n", \ 271 + "7: addq 1,%2\n", \ 265 272 " .dword 6b,7b\n") 266 273 267 274 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 269 278 "6: move.w [%1+],$acr\n" \ 270 279 " move.w $acr,[%0+]\n", \ 271 280 FIXUP \ 272 - "7: addq 2,%2\n" \ 273 - " clear.w [%0+]\n", \ 281 + "7: addq 2,%2\n", \ 274 282 TENTRY \ 275 283 " .dword 6b,7b\n") 276 284 ··· 280 290 __asm_copy_from_user_10x_cont(to, from, ret, \ 281 291 "8: move.b [%1+],$acr\n" \ 282 292 " move.b $acr,[%0+]\n", \ 283 - "9: addq 1,%2\n" \ 284 - " clear.b [%0+]\n", \ 293 + "9: addq 1,%2\n", \ 285 294 " .dword 8b,9b\n") 286 295 287 296 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 289 300 "6: move.d [%1+],$acr\n" \ 290 301 " move.d $acr,[%0+]\n", \ 291 302 FIXUP \ 292 - "7: addq 4,%2\n" \ 293 - " clear.d [%0+]\n", \ 303 + "7: addq 4,%2\n", \ 294 304 TENTRY \ 295 305 " .dword 6b,7b\n") 296 306 ··· 300 312 __asm_copy_from_user_12x_cont(to, from, ret, \ 301 313 "8: move.b [%1+],$acr\n" \ 302 314 " move.b $acr,[%0+]\n", \ 303 - "9: addq 1,%2\n" \ 304 - " clear.b [%0+]\n", \ 315 + "9: addq 1,%2\n", \ 305 316 " .dword 8b,9b\n") 306 317 307 318 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 309 322 "8: move.w [%1+],$acr\n" \ 310 323 " move.w $acr,[%0+]\n", \ 311 324 FIXUP \ 312 - "9: addq 2,%2\n" \ 313 - " clear.w [%0+]\n", \ 325 + "9: addq 2,%2\n", \ 314 326 TENTRY \ 315 327 " .dword 8b,9b\n") 316 328 ··· 320 334 __asm_copy_from_user_14x_cont(to, from, ret, \ 321 335 "10: move.b [%1+],$acr\n" \ 322 336 " move.b $acr,[%0+]\n", \ 323 - "11: addq 1,%2\n" \ 324 - " clear.b [%0+]\n", \ 337 + "11: addq 1,%2\n", \ 325 338 " .dword 10b,11b\n") 326 339 327 340 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ ··· 329 344 "8: move.d [%1+],$acr\n" \ 330 345 " move.d $acr,[%0+]\n", \ 331 346 FIXUP \ 332 - "9: addq 4,%2\n" \ 333 - " clear.d [%0+]\n", \ 347 + "9: addq 4,%2\n", \ 334 348 TENTRY \ 335 349 " .dword 8b,9b\n") 336 350 ··· 342 358 "10: move.d [%1+],$acr\n" \ 343 359 " move.d $acr,[%0+]\n", \ 344 360 FIXUP \ 345 - "11: addq 4,%2\n" \ 346 - " clear.d [%0+]\n", \ 361 + "11: addq 4,%2\n", \ 347 362 TENTRY \ 348 363 " .dword 10b,11b\n") 349 364 ··· 355 372 "12: move.d [%1+],$acr\n" \ 356 373 " move.d $acr,[%0+]\n", \ 357 374 FIXUP \ 358 - "13: addq 4,%2\n" \ 359 - " clear.d [%0+]\n", \ 375 + "13: addq 4,%2\n", \ 360 376 TENTRY \ 361 377 " .dword 12b,13b\n") 362 378
+1
arch/cris/include/asm/Kbuild
··· 9 9 generic-y += div64.h 10 10 generic-y += errno.h 11 11 generic-y += exec.h 12 + generic-y += extable.h 12 13 generic-y += emergency-restart.h 13 14 generic-y += fcntl.h 14 15 generic-y += futex.h
+12 -65
arch/cris/include/asm/uaccess.h
··· 15 15 #ifndef _CRIS_UACCESS_H 16 16 #define _CRIS_UACCESS_H 17 17 18 - #ifndef __ASSEMBLY__ 19 - #include <linux/sched.h> 20 - #include <linux/errno.h> 21 18 #include <asm/processor.h> 22 19 #include <asm/page.h> 23 - 24 - #define VERIFY_READ 0 25 - #define VERIFY_WRITE 1 26 20 27 21 /* 28 22 * The fs value determines whether argument validity checking should be ··· 43 49 44 50 #define segment_eq(a, b) ((a).seg == (b).seg) 45 51 46 - #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 52 + #define __kernel_ok (uaccess_kernel()) 47 53 #define __user_ok(addr, size) \ 48 54 (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size))) 49 55 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) 50 56 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) 51 57 52 58 #include <arch/uaccess.h> 53 - 54 - /* 55 - * The exception table consists of pairs of addresses: the first is the 56 - * address of an instruction that is allowed to fault, and the second is 57 - * the address at which the program should continue. No registers are 58 - * modified, so it is entirely up to the continuation code to figure out 59 - * what to do. 60 - * 61 - * All the routines below use bits of fixup code that are out of line 62 - * with the main instruction path. This means when everything is well, 63 - * we don't even have to jump over them. Further, they do not intrude 64 - * on our cache or tlb entries. 65 - */ 66 - 67 - struct exception_table_entry { 68 - unsigned long insn, fixup; 69 - }; 59 + #include <asm/extable.h> 70 60 71 61 /* 72 62 * These are the main single-value transfer routines. They automatically ··· 169 191 live in lib/usercopy.c */ 170 192 171 193 extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n); 172 - extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); 194 + extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned long n); 173 195 extern unsigned long __do_clear_user(void __user *to, unsigned long n); 174 196 175 197 static inline long ··· 236 258 else if (n == 24) 237 259 __asm_copy_from_user_24(to, from, ret); 238 260 else 239 - ret = __copy_user_zeroing(to, from, n); 261 + ret = __copy_user_in(to, from, n); 240 262 241 263 return ret; 242 264 } ··· 336 358 return __do_clear_user(to, n); 337 359 } 338 360 339 - static inline size_t copy_from_user(void *to, const void __user *from, size_t n) 361 + static inline unsigned long 362 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 340 363 { 341 - if (unlikely(!access_ok(VERIFY_READ, from, n))) { 342 - memset(to, 0, n); 343 - return n; 344 - } 345 364 if (__builtin_constant_p(n)) 346 365 return __constant_copy_from_user(to, from, n); 347 366 else 348 - return __copy_user_zeroing(to, from, n); 367 + return __copy_user_in(to, from, n); 349 368 } 350 369 351 - static inline size_t copy_to_user(void __user *to, const void *from, size_t n) 370 + static inline unsigned long 371 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 352 372 { 353 - if (unlikely(!access_ok(VERIFY_WRITE, to, n))) 354 - return n; 355 373 if (__builtin_constant_p(n)) 356 374 return __constant_copy_to_user(to, from, n); 357 375 else 358 376 return __copy_user(to, from, n); 359 377 } 360 378 361 - /* We let the __ versions of copy_from/to_user inline, because they're often 362 - * used in fast paths and have only a small space overhead. 363 - */ 379 + #define INLINE_COPY_FROM_USER 380 + #define INLINE_COPY_TO_USER 364 381 365 382 static inline unsigned long 366 - __generic_copy_from_user_nocheck(void *to, const void __user *from, 367 - unsigned long n) 368 - { 369 - return __copy_user_zeroing(to, from, n); 370 - } 371 - 372 - static inline unsigned long 373 - __generic_copy_to_user_nocheck(void __user *to, const void *from, 374 - unsigned long n) 375 - { 376 - return __copy_user(to, from, n); 377 - } 378 - 379 - static inline unsigned long 380 - __generic_clear_user_nocheck(void __user *to, unsigned long n) 383 + __clear_user(void __user *to, unsigned long n) 381 384 { 382 385 return __do_clear_user(to, n); 383 386 } 384 387 385 - /* without checking */ 386 - 387 - #define __copy_to_user(to, from, n) \ 388 - __generic_copy_to_user_nocheck((to), (from), (n)) 389 - #define __copy_from_user(to, from, n) \ 390 - __generic_copy_from_user_nocheck((to), (from), (n)) 391 - #define __copy_to_user_inatomic __copy_to_user 392 - #define __copy_from_user_inatomic __copy_from_user 393 - #define __clear_user(to, n) __generic_clear_user_nocheck((to), (n)) 394 - 395 388 #define strlen_user(str) strnlen_user((str), 0x7ffffffe) 396 - 397 - #endif /* __ASSEMBLY__ */ 398 389 399 390 #endif /* _CRIS_UACCESS_H */
+1
arch/frv/include/asm/Kbuild
··· 1 1 2 2 generic-y += clkdev.h 3 3 generic-y += exec.h 4 + generic-y += extable.h 4 5 generic-y += irq_work.h 5 6 generic-y += mcs_spinlock.h 6 7 generic-y += mm-arch-hooks.h
+26 -62
arch/frv/include/asm/uaccess.h
··· 15 15 /* 16 16 * User space memory access functions 17 17 */ 18 - #include <linux/sched.h> 19 18 #include <linux/mm.h> 20 19 #include <asm/segment.h> 21 20 #include <asm/sections.h> 21 + #include <asm/extable.h> 22 22 23 23 #define __ptr(x) ((unsigned long __force *)(x)) 24 - 25 - #define VERIFY_READ 0 26 - #define VERIFY_WRITE 1 27 24 28 25 /* 29 26 * check that a range of addresses falls within the current address limit ··· 59 62 60 63 #define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0) 61 64 #define __access_ok(addr,size) (__range_ok((addr), (size)) == 0) 62 - 63 - /* 64 - * The exception table consists of pairs of addresses: the first is the 65 - * address of an instruction that is allowed to fault, and the second is 66 - * the address at which the program should continue. No registers are 67 - * modified, so it is entirely up to the continuation code to figure out 68 - * what to do. 69 - * 70 - * All the routines below use bits of fixup code that are out of line 71 - * with the main instruction path. This means when everything is well, 72 - * we don't even have to jump over them. Further, they do not intrude 73 - * on our cache or tlb entries. 74 - */ 75 - struct exception_table_entry 76 - { 77 - unsigned long insn, fixup; 78 - }; 79 - 80 - /* Returns 0 if exception not found and fixup otherwise. */ 81 - extern unsigned long search_exception_table(unsigned long); 82 65 83 66 84 67 /* ··· 233 256 /* 234 257 * 235 258 */ 259 + 236 260 #define ____force(x) (__force void *)(void __user *)(x) 237 261 #ifdef CONFIG_MMU 238 262 extern long __memset_user(void *dst, unsigned long count); 239 263 extern long __memcpy_user(void *dst, const void *src, unsigned long count); 240 264 241 265 #define __clear_user(dst,count) __memset_user(____force(dst), (count)) 242 - #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n)) 243 - #define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n)) 244 266 245 267 #else 246 268 247 269 #define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) 248 - #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0) 249 - #define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0) 250 270 251 271 #endif 272 + 273 + static inline unsigned long 274 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 275 + { 276 + #ifdef CONFIG_MMU 277 + return __memcpy_user(to, (__force const void *)from, n); 278 + #else 279 + memcpy(to, (__force const void *)from, n); 280 + return 0; 281 + #endif 282 + } 283 + 284 + static inline unsigned long 285 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 286 + { 287 + #ifdef CONFIG_MMU 288 + return __memcpy_user((__force void *)to, from, n); 289 + #else 290 + memcpy((__force void *)to, from, n); 291 + return 0; 292 + #endif 293 + } 294 + #define INLINE_COPY_TO_USER 295 + #define INLINE_COPY_FROM_USER 252 296 253 297 static inline unsigned long __must_check 254 298 clear_user(void __user *to, unsigned long n) ··· 279 281 return n; 280 282 } 281 283 282 - static inline unsigned long __must_check 283 - __copy_to_user(void __user *to, const void *from, unsigned long n) 284 - { 285 - might_fault(); 286 - return __copy_to_user_inatomic(to, from, n); 287 - } 288 - 289 - static inline unsigned long 290 - __copy_from_user(void *to, const void __user *from, unsigned long n) 291 - { 292 - might_fault(); 293 - return __copy_from_user_inatomic(to, from, n); 294 - } 295 - 296 - static inline long copy_from_user(void *to, const void __user *from, unsigned long n) 297 - { 298 - unsigned long ret = n; 299 - 300 - if (likely(__access_ok(from, n))) 301 - ret = __copy_from_user(to, from, n); 302 - 303 - if (unlikely(ret != 0)) 304 - memset(to + (n - ret), 0, ret); 305 - 306 - return ret; 307 - } 308 - 309 - static inline long copy_to_user(void __user *to, const void *from, unsigned long n) 310 - { 311 - return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n; 312 - } 313 - 314 284 extern long strncpy_from_user(char *dst, const char __user *src, long count); 315 285 extern long strnlen_user(const char __user *src, long count); 316 286 317 287 #define strlen_user(str) strnlen_user(str, 32767) 318 - 319 - extern unsigned long search_exception_table(unsigned long addr); 320 288 321 289 #endif /* _ASM_UACCESS_H */
+1 -6
arch/frv/kernel/traps.c
··· 360 360 siginfo_t info; 361 361 362 362 #ifdef CONFIG_MMU 363 - unsigned long fixup; 364 - 365 - fixup = search_exception_table(__frame->pc); 366 - if (fixup) { 367 - __frame->pc = fixup; 363 + if (fixup_exception(__frame)) 368 364 return; 369 - } 370 365 #endif 371 366 372 367 die_if_kernel("-- Memory Access Exception --\n"
+13 -14
arch/frv/mm/extable.c
··· 10 10 extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler; 11 11 extern spinlock_t modlist_lock; 12 12 13 - 14 - /*****************************************************************************/ 15 - /* 16 - * see if there's a fixup handler available to deal with a kernel fault 17 - */ 18 - unsigned long search_exception_table(unsigned long pc) 13 + int fixup_exception(struct pt_regs *regs) 19 14 { 20 15 const struct exception_table_entry *extab; 16 + unsigned long pc = regs->pc; 21 17 22 18 /* determine if the fault lay during a memcpy_user or a memset_user */ 23 - if (__frame->lr == (unsigned long) &__memset_user_error_lr && 19 + if (regs->lr == (unsigned long) &__memset_user_error_lr && 24 20 (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end 25 21 ) { 26 22 /* the fault occurred in a protected memset 27 23 * - we search for the return address (in LR) instead of the program counter 28 24 * - it was probably during a clear_user() 29 25 */ 30 - return (unsigned long) &__memset_user_error_handler; 26 + regs->pc = (unsigned long) &__memset_user_error_handler; 27 + return 1; 31 28 } 32 29 33 - if (__frame->lr == (unsigned long) &__memcpy_user_error_lr && 30 + if (regs->lr == (unsigned long) &__memcpy_user_error_lr && 34 31 (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end 35 32 ) { 36 33 /* the fault occurred in a protected memset 37 34 * - we search for the return address (in LR) instead of the program counter 38 35 * - it was probably during a copy_to/from_user() 39 36 */ 40 - return (unsigned long) &__memcpy_user_error_handler; 37 + regs->pc = (unsigned long) &__memcpy_user_error_handler; 38 + return 1; 41 39 } 42 40 43 41 extab = search_exception_tables(pc); 44 - if (extab) 45 - return extab->fixup; 42 + if (extab) { 43 + regs->pc = extab->fixup; 44 + return 1; 45 + } 46 46 47 47 return 0; 48 - 49 - } /* end search_exception_table() */ 48 + }
+2 -4
arch/frv/mm/fault.c
··· 33 33 { 34 34 struct vm_area_struct *vma; 35 35 struct mm_struct *mm; 36 - unsigned long _pme, lrai, lrad, fixup; 36 + unsigned long _pme, lrai, lrad; 37 37 unsigned long flags = 0; 38 38 siginfo_t info; 39 39 pgd_t *pge; ··· 201 201 202 202 no_context: 203 203 /* are we prepared to handle this kernel fault? */ 204 - if ((fixup = search_exception_table(__frame->pc)) != 0) { 205 - __frame->pc = fixup; 204 + if (fixup_exception(__frame)) 206 205 return; 207 - } 208 206 209 207 /* 210 208 * Oops. The kernel tried to access some bad page. We'll have to
+1 -1
arch/h8300/include/asm/Kbuild
··· 13 13 generic-y += emergency-restart.h 14 14 generic-y += errno.h 15 15 generic-y += exec.h 16 + generic-y += extable.h 16 17 generic-y += fb.h 17 18 generic-y += fcntl.h 18 19 generic-y += ftrace.h ··· 69 68 generic-y += trace_clock.h 70 69 generic-y += topology.h 71 70 generic-y += types.h 72 - generic-y += uaccess.h 73 71 generic-y += ucontext.h 74 72 generic-y += unaligned.h 75 73 generic-y += vga.h
+54
arch/h8300/include/asm/uaccess.h
··· 1 + #ifndef _ASM_UACCESS_H 2 + #define _ASM_UACCESS_H 3 + 4 + #include <linux/string.h> 5 + 6 + static inline __must_check unsigned long 7 + raw_copy_from_user(void *to, const void __user * from, unsigned long n) 8 + { 9 + if (__builtin_constant_p(n)) { 10 + switch(n) { 11 + case 1: 12 + *(u8 *)to = *(u8 __force *)from; 13 + return 0; 14 + case 2: 15 + *(u16 *)to = *(u16 __force *)from; 16 + return 0; 17 + case 4: 18 + *(u32 *)to = *(u32 __force *)from; 19 + return 0; 20 + } 21 + } 22 + 23 + memcpy(to, (const void __force *)from, n); 24 + return 0; 25 + } 26 + 27 + static inline __must_check unsigned long 28 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 29 + { 30 + if (__builtin_constant_p(n)) { 31 + switch(n) { 32 + case 1: 33 + *(u8 __force *)to = *(u8 *)from; 34 + return 0; 35 + case 2: 36 + *(u16 __force *)to = *(u16 *)from; 37 + return 0; 38 + case 4: 39 + *(u32 __force *)to = *(u32 *)from; 40 + return 0; 41 + default: 42 + break; 43 + } 44 + } 45 + 46 + memcpy((void __force *)to, from, n); 47 + return 0; 48 + } 49 + #define INLINE_COPY_FROM_USER 50 + #define INLINE_COPY_TO_USER 51 + 52 + #include <asm-generic/uaccess.h> 53 + 54 + #endif
+1
arch/hexagon/include/asm/Kbuild
··· 11 11 generic-y += div64.h 12 12 generic-y += emergency-restart.h 13 13 generic-y += errno.h 14 + generic-y += extable.h 14 15 generic-y += fb.h 15 16 generic-y += fcntl.h 16 17 generic-y += ftrace.h
+10 -16
arch/hexagon/include/asm/uaccess.h
··· 23 23 /* 24 24 * User space memory access functions 25 25 */ 26 - #include <linux/sched.h> 27 26 #include <linux/mm.h> 28 27 #include <asm/segment.h> 29 28 #include <asm/sections.h> ··· 49 50 * reasonably simple and not *too* slow. After all, we've got the 50 51 * MMU for backup. 51 52 */ 52 - #define VERIFY_READ 0 53 - #define VERIFY_WRITE 1 54 53 55 54 #define __access_ok(addr, size) \ 56 55 ((get_fs().seg == KERNEL_DS.seg) || \ ··· 65 68 */ 66 69 67 70 /* Assembly somewhat optimized copy routines */ 68 - unsigned long __copy_from_user_hexagon(void *to, const void __user *from, 71 + unsigned long raw_copy_from_user(void *to, const void __user *from, 69 72 unsigned long n); 70 - unsigned long __copy_to_user_hexagon(void __user *to, const void *from, 73 + unsigned long raw_copy_to_user(void __user *to, const void *from, 71 74 unsigned long n); 72 - 73 - #define __copy_from_user(to, from, n) __copy_from_user_hexagon(to, from, n) 74 - #define __copy_to_user(to, from, n) __copy_to_user_hexagon(to, from, n) 75 - 76 - /* 77 - * XXX todo: some additonal performance gain is possible by 78 - * implementing __copy_to/from_user_inatomic, which is much 79 - * like __copy_to/from_user, but performs slightly less checking. 80 - */ 75 + #define INLINE_COPY_FROM_USER 76 + #define INLINE_COPY_TO_USER 81 77 82 78 __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count); 83 79 #define __clear_user(a, s) __clear_user_hexagon((a), (s)) ··· 97 107 return -EFAULT; 98 108 99 109 if (res > n) { 100 - copy_from_user(dst, src, n); 110 + long left = raw_copy_from_user(dst, src, n); 111 + if (unlikely(left)) 112 + memset(dst + (n - left), 0, left); 101 113 return n; 102 114 } else { 103 - copy_from_user(dst, src, res); 115 + long left = raw_copy_from_user(dst, src, res); 116 + if (unlikely(left)) 117 + memset(dst + (res - left), 0, left); 104 118 return res-1; 105 119 } 106 120 }
+2 -2
arch/hexagon/kernel/hexagon_ksyms.c
··· 25 25 26 26 /* Additional functions */ 27 27 EXPORT_SYMBOL(__clear_user_hexagon); 28 - EXPORT_SYMBOL(__copy_from_user_hexagon); 29 - EXPORT_SYMBOL(__copy_to_user_hexagon); 28 + EXPORT_SYMBOL(raw_copy_from_user); 29 + EXPORT_SYMBOL(raw_copy_to_user); 30 30 EXPORT_SYMBOL(__iounmap); 31 31 EXPORT_SYMBOL(__strnlen_user); 32 32 EXPORT_SYMBOL(__vmgetie);
+1 -1
arch/hexagon/mm/copy_from_user.S
··· 44 44 #define bytes r2 45 45 #define loopcount r5 46 46 47 - #define FUNCNAME __copy_from_user_hexagon 47 + #define FUNCNAME raw_copy_from_user 48 48 #include "copy_user_template.S" 49 49 50 50 /* LOAD FAULTS from COPY_FROM_USER */
+1 -1
arch/hexagon/mm/copy_to_user.S
··· 43 43 #define bytes r2 44 44 #define loopcount r5 45 45 46 - #define FUNCNAME __copy_to_user_hexagon 46 + #define FUNCNAME raw_copy_to_user 47 47 #include "copy_user_template.S" 48 48 49 49 /* STORE FAULTS from COPY_TO_USER */
-1
arch/ia64/Kconfig
··· 52 52 select MODULES_USE_ELF_RELA 53 53 select ARCH_USE_CMPXCHG_LOCKREF 54 54 select HAVE_ARCH_AUDITSYSCALL 55 - select HAVE_ARCH_HARDENED_USERCOPY 56 55 default y 57 56 help 58 57 The Itanium Processor Family is Intel's 64-bit successor to
+11
arch/ia64/include/asm/extable.h
··· 1 + #ifndef _ASM_IA64_EXTABLE_H 2 + #define _ASM_IA64_EXTABLE_H 3 + 4 + #define ARCH_HAS_RELATIVE_EXTABLE 5 + 6 + struct exception_table_entry { 7 + int insn; /* location-relative address of insn this fixup is for */ 8 + int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ 9 + }; 10 + 11 + #endif
+27 -75
arch/ia64/include/asm/uaccess.h
··· 33 33 */ 34 34 35 35 #include <linux/compiler.h> 36 - #include <linux/errno.h> 37 - #include <linux/sched.h> 38 36 #include <linux/page-flags.h> 39 37 #include <linux/mm.h> 40 38 41 39 #include <asm/intrinsics.h> 42 40 #include <asm/pgtable.h> 43 41 #include <asm/io.h> 42 + #include <asm/extable.h> 44 43 45 44 /* 46 45 * For historical reasons, the following macros are grossly misnamed: 47 46 */ 48 47 #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ 49 48 #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ 50 - 51 - #define VERIFY_READ 0 52 - #define VERIFY_WRITE 1 53 49 54 50 #define get_ds() (KERNEL_DS) 55 51 #define get_fs() (current_thread_info()->addr_limit) ··· 59 63 * address TASK_SIZE is never valid. We also need to make sure that the address doesn't 60 64 * point inside the virtually mapped linear page table. 61 65 */ 62 - #define __access_ok(addr, size, segment) \ 63 - ({ \ 64 - __chk_user_ptr(addr); \ 65 - (likely((unsigned long) (addr) <= (segment).seg) \ 66 - && ((segment).seg == KERNEL_DS.seg \ 67 - || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ 68 - }) 69 - #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) 66 + static inline int __access_ok(const void __user *p, unsigned long size) 67 + { 68 + unsigned long addr = (unsigned long)p; 69 + unsigned long seg = get_fs().seg; 70 + return likely(addr <= seg) && 71 + (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); 72 + } 73 + #define access_ok(type, addr, size) __access_ok((addr), (size)) 70 74 71 75 /* 72 76 * These are the main single-value transfer routines. They automatically ··· 76 80 * (a) re-use the arguments for side effects (sizeof/typeof is ok) 77 81 * (b) require any knowledge of processes at this stage 78 82 */ 79 - #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs()) 80 - #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) 83 + #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) 84 + #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) 81 85 82 86 /* 83 87 * The "__xxx" versions do not do address space checking, useful when ··· 180 184 * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while 181 185 * using r8/r9. 182 186 */ 183 - #define __do_get_user(check, x, ptr, size, segment) \ 187 + #define __do_get_user(check, x, ptr, size) \ 184 188 ({ \ 185 189 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 186 190 __typeof__ (size) __gu_size = (size); \ 187 191 long __gu_err = -EFAULT; \ 188 192 unsigned long __gu_val = 0; \ 189 - if (!check || __access_ok(__gu_ptr, size, segment)) \ 193 + if (!check || __access_ok(__gu_ptr, size)) \ 190 194 switch (__gu_size) { \ 191 195 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ 192 196 case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ ··· 198 202 __gu_err; \ 199 203 }) 200 204 201 - #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) 202 - #define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment) 205 + #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size) 206 + #define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size) 203 207 204 208 extern void __put_user_unknown (void); 205 209 ··· 207 211 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which 208 212 * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. 209 213 */ 210 - #define __do_put_user(check, x, ptr, size, segment) \ 214 + #define __do_put_user(check, x, ptr, size) \ 211 215 ({ \ 212 216 __typeof__ (x) __pu_x = (x); \ 213 217 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ 214 218 __typeof__ (size) __pu_size = (size); \ 215 219 long __pu_err = -EFAULT; \ 216 220 \ 217 - if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \ 221 + if (!check || __access_ok(__pu_ptr, __pu_size)) \ 218 222 switch (__pu_size) { \ 219 223 case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ 220 224 case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ ··· 225 229 __pu_err; \ 226 230 }) 227 231 228 - #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) 229 - #define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment) 232 + #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size) 233 + #define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size) 230 234 231 235 /* 232 236 * Complex access routines ··· 235 239 unsigned long count); 236 240 237 241 static inline unsigned long 238 - __copy_to_user (void __user *to, const void *from, unsigned long count) 242 + raw_copy_to_user(void __user *to, const void *from, unsigned long count) 239 243 { 240 - check_object_size(from, count, true); 241 - 242 244 return __copy_user(to, (__force void __user *) from, count); 243 245 } 244 246 245 247 static inline unsigned long 246 - __copy_from_user (void *to, const void __user *from, unsigned long count) 248 + raw_copy_from_user(void *to, const void __user *from, unsigned long count) 247 249 { 248 - check_object_size(to, count, false); 249 - 250 250 return __copy_user((__force void __user *) to, from, count); 251 251 } 252 252 253 - #define __copy_to_user_inatomic __copy_to_user 254 - #define __copy_from_user_inatomic __copy_from_user 255 - #define copy_to_user(to, from, n) \ 256 - ({ \ 257 - void __user *__cu_to = (to); \ 258 - const void *__cu_from = (from); \ 259 - long __cu_len = (n); \ 260 - \ 261 - if (__access_ok(__cu_to, __cu_len, get_fs())) { \ 262 - check_object_size(__cu_from, __cu_len, true); \ 263 - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 264 - } \ 265 - __cu_len; \ 266 - }) 267 - 268 - static inline unsigned long 269 - copy_from_user(void *to, const void __user *from, unsigned long n) 270 - { 271 - check_object_size(to, n, false); 272 - if (likely(__access_ok(from, n, get_fs()))) 273 - n = __copy_user((__force void __user *) to, from, n); 274 - else 275 - memset(to, 0, n); 276 - return n; 277 - } 278 - 279 - #define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) 280 - 281 - static inline unsigned long 282 - copy_in_user (void __user *to, const void __user *from, unsigned long n) 283 - { 284 - if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))) 285 - n = __copy_user(to, from, n); 286 - return n; 287 - } 253 + #define INLINE_COPY_FROM_USER 254 + #define INLINE_COPY_TO_USER 288 255 289 256 extern unsigned long __do_clear_user (void __user *, unsigned long); 290 257 ··· 256 297 #define clear_user(to, n) \ 257 298 ({ \ 258 299 unsigned long __cu_len = (n); \ 259 - if (__access_ok(to, __cu_len, get_fs())) \ 300 + if (__access_ok(to, __cu_len)) \ 260 301 __cu_len = __do_clear_user(to, __cu_len); \ 261 302 __cu_len; \ 262 303 }) ··· 272 313 ({ \ 273 314 const char __user * __sfu_from = (from); \ 274 315 long __sfu_ret = -EFAULT; \ 275 - if (__access_ok(__sfu_from, 0, get_fs())) \ 316 + if (__access_ok(__sfu_from, 0)) \ 276 317 __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ 277 318 __sfu_ret; \ 278 319 }) ··· 284 325 ({ \ 285 326 const char __user *__su_str = (str); \ 286 327 unsigned long __su_ret = 0; \ 287 - if (__access_ok(__su_str, 0, get_fs())) \ 328 + if (__access_ok(__su_str, 0)) \ 288 329 __su_ret = __strlen_user(__su_str); \ 289 330 __su_ret; \ 290 331 }) ··· 300 341 ({ \ 301 342 const char __user *__su_str = (str); \ 302 343 unsigned long __su_ret = 0; \ 303 - if (__access_ok(__su_str, 0, get_fs())) \ 344 + if (__access_ok(__su_str, 0)) \ 304 345 __su_ret = __strnlen_user(__su_str, len); \ 305 346 __su_ret; \ 306 347 }) 307 - 308 - #define ARCH_HAS_RELATIVE_EXTABLE 309 - 310 - struct exception_table_entry { 311 - int insn; /* location-relative address of insn this fixup is for */ 312 - int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ 313 - }; 314 348 315 349 #define ARCH_HAS_TRANSLATE_MEM_PTR 1 316 350 static __inline__ void *
+1 -12
arch/ia64/lib/memcpy_mck.S
··· 556 556 #define D r22 557 557 #define F r28 558 558 559 - #define memset_arg0 r32 560 - #define memset_arg2 r33 561 - 562 559 #define saved_retval loc0 563 560 #define saved_rtlink loc1 564 561 #define saved_pfs_stack loc2 ··· 619 622 * (faulting_addr - orig_dst) -> len to faulting st address 620 623 * B = (cur_dst - orig_dst) -> len copied so far 621 624 * C = A - B -> len need to be copied 622 - * D = orig_len - A -> len need to be zeroed 625 + * D = orig_len - A -> len need to be left along 623 626 */ 624 627 (p6) sub A = F, saved_in0 625 628 (p7) sub A = F, saved_in1 ··· 635 638 sub D = saved_in2, A 636 639 ;; 637 640 cmp.gt p8,p0=C,r0 // more than 1 byte? 638 - add memset_arg0=saved_in0, A 639 - (p6) mov memset_arg2=0 // copy_to_user should not call memset 640 - (p7) mov memset_arg2=D // copy_from_user need to have kbuf zeroed 641 641 mov r8=0 642 642 mov saved_retval = D 643 643 mov saved_rtlink = b0 ··· 646 652 ;; 647 653 648 654 add saved_retval=saved_retval,r8 // above might return non-zero value 649 - cmp.gt p8,p0=memset_arg2,r0 // more than 1 byte? 650 - mov out0=memset_arg0 // *s 651 - mov out1=r0 // c 652 - mov out2=memset_arg2 // n 653 - (p8) br.call.sptk.few b0=memset 654 655 ;; 655 656 656 657 mov retval=saved_retval
+4 -1
arch/ia64/mm/extable.c
··· 5 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 6 */ 7 7 8 - #include <linux/uaccess.h> 8 + #include <asm/ptrace.h> 9 + #include <asm/extable.h> 10 + #include <asm/errno.h> 11 + #include <asm/processor.h> 9 12 10 13 void 11 14 ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e)
+1
arch/m32r/include/asm/Kbuild
··· 2 2 generic-y += clkdev.h 3 3 generic-y += current.h 4 4 generic-y += exec.h 5 + generic-y += extable.h 5 6 generic-y += irq_work.h 6 7 generic-y += kvm_para.h 7 8 generic-y += mcs_spinlock.h
+12 -183
arch/m32r/include/asm/uaccess.h
··· 11 11 /* 12 12 * User space memory access functions 13 13 */ 14 - #include <linux/errno.h> 15 - #include <linux/thread_info.h> 16 14 #include <asm/page.h> 17 15 #include <asm/setup.h> 18 - 19 - #define VERIFY_READ 0 20 - #define VERIFY_WRITE 1 16 + #include <linux/prefetch.h> 21 17 22 18 /* 23 19 * The fs value determines whether argument validity checking should be ··· 110 114 } 111 115 #endif /* CONFIG_MMU */ 112 116 113 - /* 114 - * The exception table consists of pairs of addresses: the first is the 115 - * address of an instruction that is allowed to fault, and the second is 116 - * the address at which the program should continue. No registers are 117 - * modified, so it is entirely up to the continuation code to figure out 118 - * what to do. 119 - * 120 - * All the routines below use bits of fixup code that are out of line 121 - * with the main instruction path. This means when everything is well, 122 - * we don't even have to jump over them. Further, they do not intrude 123 - * on our cache or tlb entries. 124 - */ 125 - 126 - struct exception_table_entry 127 - { 128 - unsigned long insn, fixup; 129 - }; 130 - 131 - extern int fixup_exception(struct pt_regs *regs); 117 + #include <asm/extable.h> 132 118 133 119 /* 134 120 * These are the main single-value transfer routines. They automatically ··· 461 483 : "r14", "memory"); \ 462 484 } while (0) 463 485 464 - #define __copy_user_zeroing(to, from, size) \ 465 - do { \ 466 - unsigned long __dst, __src, __c; \ 467 - __asm__ __volatile__ ( \ 468 - " mv r14, %0\n" \ 469 - " or r14, %1\n" \ 470 - " beq %0, %1, 9f\n" \ 471 - " beqz %2, 9f\n" \ 472 - " and3 r14, r14, #3\n" \ 473 - " bnez r14, 2f\n" \ 474 - " and3 %2, %2, #3\n" \ 475 - " beqz %3, 2f\n" \ 476 - " addi %0, #-4 ; word_copy \n" \ 477 - " .fillinsn\n" \ 478 - "0: ld r14, @%1+\n" \ 479 - " addi %3, #-1\n" \ 480 - " .fillinsn\n" \ 481 - "1: st r14, @+%0\n" \ 482 - " bnez %3, 0b\n" \ 483 - " beqz %2, 9f\n" \ 484 - " addi %0, #4\n" \ 485 - " .fillinsn\n" \ 486 - "2: ldb r14, @%1 ; byte_copy \n" \ 487 - " .fillinsn\n" \ 488 - "3: stb r14, @%0\n" \ 489 - " addi %1, #1\n" \ 490 - " addi %2, #-1\n" \ 491 - " addi %0, #1\n" \ 492 - " bnez %2, 2b\n" \ 493 - " .fillinsn\n" \ 494 - "9:\n" \ 495 - ".section .fixup,\"ax\"\n" \ 496 - " .balign 4\n" \ 497 - "5: addi %3, #1\n" \ 498 - " addi %1, #-4\n" \ 499 - " .fillinsn\n" \ 500 - "6: slli %3, #2\n" \ 501 - " add %2, %3\n" \ 502 - " addi %0, #4\n" \ 503 - " .fillinsn\n" \ 504 - "7: ldi r14, #0 ; store zero \n" \ 505 - " .fillinsn\n" \ 506 - "8: addi %2, #-1\n" \ 507 - " stb r14, @%0 ; ACE? \n" \ 508 - " addi %0, #1\n" \ 509 - " bnez %2, 8b\n" \ 510 - " seth r14, #high(9b)\n" \ 511 - " or3 r14, r14, #low(9b)\n" \ 512 - " jmp r14\n" \ 513 - ".previous\n" \ 514 - ".section __ex_table,\"a\"\n" \ 515 - " .balign 4\n" \ 516 - " .long 0b,6b\n" \ 517 - " .long 1b,5b\n" \ 518 - " .long 2b,7b\n" \ 519 - " .long 3b,7b\n" \ 520 - ".previous\n" \ 521 - : "=&r" (__dst), "=&r" (__src), "=&r" (size), \ 522 - "=&r" (__c) \ 523 - : "0" (to), "1" (from), "2" (size), "3" (size / 4) \ 524 - : "r14", "memory"); \ 525 - } while (0) 526 - 527 - 528 486 /* We let the __ versions of copy_from/to_user inline, because they're often 529 487 * used in fast paths and have only a small space overhead. 530 488 */ 531 - static inline unsigned long __generic_copy_from_user_nocheck(void *to, 532 - const void __user *from, unsigned long n) 489 + static inline unsigned long 490 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 533 491 { 534 - __copy_user_zeroing(to, from, n); 535 - return n; 536 - } 537 - 538 - static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, 539 - const void *from, unsigned long n) 540 - { 492 + prefetchw(to); 541 493 __copy_user(to, from, n); 542 494 return n; 543 495 } 544 496 545 - unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long); 546 - unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long); 547 - 548 - /** 549 - * __copy_to_user: - Copy a block of data into user space, with less checking. 550 - * @to: Destination address, in user space. 551 - * @from: Source address, in kernel space. 552 - * @n: Number of bytes to copy. 553 - * 554 - * Context: User context only. This function may sleep if pagefaults are 555 - * enabled. 556 - * 557 - * Copy data from kernel space to user space. Caller must check 558 - * the specified block with access_ok() before calling this function. 559 - * 560 - * Returns number of bytes that could not be copied. 561 - * On success, this will be zero. 562 - */ 563 - #define __copy_to_user(to, from, n) \ 564 - __generic_copy_to_user_nocheck((to), (from), (n)) 565 - 566 - #define __copy_to_user_inatomic __copy_to_user 567 - #define __copy_from_user_inatomic __copy_from_user 568 - 569 - /** 570 - * copy_to_user: - Copy a block of data into user space. 571 - * @to: Destination address, in user space. 572 - * @from: Source address, in kernel space. 573 - * @n: Number of bytes to copy. 574 - * 575 - * Context: User context only. This function may sleep if pagefaults are 576 - * enabled. 577 - * 578 - * Copy data from kernel space to user space. 579 - * 580 - * Returns number of bytes that could not be copied. 581 - * On success, this will be zero. 582 - */ 583 - #define copy_to_user(to, from, n) \ 584 - ({ \ 585 - might_fault(); \ 586 - __generic_copy_to_user((to), (from), (n)); \ 587 - }) 588 - 589 - /** 590 - * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. 591 - * @from: Source address, in user space. 592 - * @n: Number of bytes to copy. 593 - * 594 - * Context: User context only. This function may sleep if pagefaults are 595 - * enabled. 596 - * 597 - * Copy data from user space to kernel space. Caller must check 598 - * the specified block with access_ok() before calling this function. 599 - * 600 - * Returns number of bytes that could not be copied. 601 - * On success, this will be zero. 602 - * 603 - * If some data could not be copied, this function will pad the copied 604 - * data to the requested size using zero bytes. 605 - */ 606 - #define __copy_from_user(to, from, n) \ 607 - __generic_copy_from_user_nocheck((to), (from), (n)) 608 - 609 - /** 610 - * copy_from_user: - Copy a block of data from user space. 611 - * @to: Destination address, in kernel space. 612 - * @from: Source address, in user space. 613 - * @n: Number of bytes to copy. 614 - * 615 - * Context: User context only. This function may sleep if pagefaults are 616 - * enabled. 617 - * 618 - * Copy data from user space to kernel space. 619 - * 620 - * Returns number of bytes that could not be copied. 621 - * On success, this will be zero. 622 - * 623 - * If some data could not be copied, this function will pad the copied 624 - * data to the requested size using zero bytes. 625 - */ 626 - #define copy_from_user(to, from, n) \ 627 - ({ \ 628 - might_fault(); \ 629 - __generic_copy_from_user((to), (from), (n)); \ 630 - }) 497 + static inline unsigned long 498 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 499 + { 500 + prefetch(from); 501 + __copy_user(to, from, n); 502 + return n; 503 + } 631 504 632 505 long __must_check strncpy_from_user(char *dst, const char __user *src, 633 506 long count);
-2
arch/m32r/kernel/m32r_ksyms.c
··· 26 26 EXPORT_SYMBOL(__strncpy_from_user); 27 27 EXPORT_SYMBOL(clear_user); 28 28 EXPORT_SYMBOL(__clear_user); 29 - EXPORT_SYMBOL(__generic_copy_from_user); 30 - EXPORT_SYMBOL(__generic_copy_to_user); 31 29 EXPORT_SYMBOL(strnlen_user); 32 30 33 31 #ifdef CONFIG_SMP
-21
arch/m32r/lib/usercopy.c
··· 11 11 #include <linux/thread_info.h> 12 12 #include <linux/uaccess.h> 13 13 14 - unsigned long 15 - __generic_copy_to_user(void __user *to, const void *from, unsigned long n) 16 - { 17 - prefetch(from); 18 - if (access_ok(VERIFY_WRITE, to, n)) 19 - __copy_user(to,from,n); 20 - return n; 21 - } 22 - 23 - unsigned long 24 - __generic_copy_from_user(void *to, const void __user *from, unsigned long n) 25 - { 26 - prefetchw(to); 27 - if (access_ok(VERIFY_READ, from, n)) 28 - __copy_user_zeroing(to,from,n); 29 - else 30 - memset(to, 0, n); 31 - return n; 32 - } 33 - 34 - 35 14 /* 36 15 * Copy a null terminated string from userspace. 37 16 */
+1
arch/m68k/include/asm/Kbuild
··· 5 5 generic-y += emergency-restart.h 6 6 generic-y += errno.h 7 7 generic-y += exec.h 8 + generic-y += extable.h 8 9 generic-y += futex.h 9 10 generic-y += hw_irq.h 10 11 generic-y += ioctl.h
-10
arch/m68k/include/asm/processor.h
··· 122 122 wrusp(usp); 123 123 } 124 124 125 - #ifdef CONFIG_MMU 126 - extern int handle_kernel_fault(struct pt_regs *regs); 127 - #else 128 - static inline int handle_kernel_fault(struct pt_regs *regs) 129 - { 130 - /* Any fault in kernel is fatal on non-mmu */ 131 - return 0; 132 - } 133 - #endif 134 - 135 125 /* Forward declaration, a strange C thing */ 136 126 struct task_struct; 137 127
+1
arch/m68k/include/asm/uaccess.h
··· 4 4 #include <asm/uaccess_mm.h> 5 5 #endif 6 6 7 + #include <asm/extable.h> 7 8 #ifdef CONFIG_CPU_HAS_NO_UNALIGNED 8 9 #include <asm-generic/uaccess-unaligned.h> 9 10 #else
+49 -54
arch/m68k/include/asm/uaccess_mm.h
··· 5 5 * User space memory access functions 6 6 */ 7 7 #include <linux/compiler.h> 8 - #include <linux/errno.h> 9 8 #include <linux/types.h> 10 - #include <linux/sched.h> 11 9 #include <asm/segment.h> 12 - 13 - #define VERIFY_READ 0 14 - #define VERIFY_WRITE 1 15 10 16 11 /* We let the MMU do all checking */ 17 12 static inline int access_ok(int type, const void __user *addr, ··· 30 35 #else 31 36 #define MOVES "move" 32 37 #endif 33 - 34 - /* 35 - * The exception table consists of pairs of addresses: the first is the 36 - * address of an instruction that is allowed to fault, and the second is 37 - * the address at which the program should continue. No registers are 38 - * modified, so it is entirely up to the continuation code to figure out 39 - * what to do. 40 - * 41 - * All the routines below use bits of fixup code that are out of line 42 - * with the main instruction path. This means when everything is well, 43 - * we don't even have to jump over them. Further, they do not intrude 44 - * on our cache or tlb entries. 45 - */ 46 - 47 - struct exception_table_entry 48 - { 49 - unsigned long insn, fixup; 50 - }; 51 38 52 39 extern int __put_user_bad(void); 53 40 extern int __get_user_bad(void); ··· 179 202 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); 180 203 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); 181 204 182 - #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ 205 + #define __suffix0 206 + #define __suffix1 b 207 + #define __suffix2 w 208 + #define __suffix4 l 209 + 210 + #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ 183 211 asm volatile ("\n" \ 184 212 "1: "MOVES"."#s1" (%2)+,%3\n" \ 185 213 " move."#s1" %3,(%1)+\n" \ 214 + " .ifnc \""#s2"\",\"\"\n" \ 186 215 "2: "MOVES"."#s2" (%2)+,%3\n" \ 187 216 " move."#s2" %3,(%1)+\n" \ 188 217 " .ifnc \""#s3"\",\"\"\n" \ 189 218 "3: "MOVES"."#s3" (%2)+,%3\n" \ 190 219 " move."#s3" %3,(%1)+\n" \ 191 220 " .endif\n" \ 221 + " .endif\n" \ 192 222 "4:\n" \ 193 223 " .section __ex_table,\"a\"\n" \ 194 224 " .align 4\n" \ 195 225 " .long 1b,10f\n" \ 226 + " .ifnc \""#s2"\",\"\"\n" \ 196 227 " .long 2b,20f\n" \ 197 228 " .ifnc \""#s3"\",\"\"\n" \ 198 229 " .long 3b,30f\n" \ 230 + " .endif\n" \ 199 231 " .endif\n" \ 200 232 " .previous\n" \ 201 233 "\n" \ 202 234 " .section .fixup,\"ax\"\n" \ 203 235 " .even\n" \ 204 - "10: clr."#s1" (%1)+\n" \ 205 - "20: clr."#s2" (%1)+\n" \ 236 + "10: addq.l #"#n1",%0\n" \ 237 + " .ifnc \""#s2"\",\"\"\n" \ 238 + "20: addq.l #"#n2",%0\n" \ 206 239 " .ifnc \""#s3"\",\"\"\n" \ 207 - "30: clr."#s3" (%1)+\n" \ 240 + "30: addq.l #"#n3",%0\n" \ 208 241 " .endif\n" \ 209 - " moveq.l #"#n",%0\n" \ 242 + " .endif\n" \ 210 243 " jra 4b\n" \ 211 244 " .previous\n" \ 212 245 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ 213 246 : : "memory") 247 + 248 + #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ 249 + ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3) 250 + #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \ 251 + ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \ 252 + __suffix##n1, __suffix##n2, __suffix##n3) 214 253 215 254 static __always_inline unsigned long 216 255 __constant_copy_from_user(void *to, const void __user *from, unsigned long n) ··· 235 242 236 243 switch (n) { 237 244 case 1: 238 - __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); 245 + __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0); 239 246 break; 240 247 case 2: 241 - __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2); 248 + __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0); 242 249 break; 243 250 case 3: 244 - __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); 251 + __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0); 245 252 break; 246 253 case 4: 247 - __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4); 254 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0); 248 255 break; 249 256 case 5: 250 - __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,); 257 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0); 251 258 break; 252 259 case 6: 253 - __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,); 260 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0); 254 261 break; 255 262 case 7: 256 - __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b); 263 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1); 257 264 break; 258 265 case 8: 259 - __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,); 266 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0); 260 267 break; 261 268 case 9: 262 - __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b); 269 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1); 263 270 break; 264 271 case 10: 265 - __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w); 272 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2); 266 273 break; 267 274 case 12: 268 - __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l); 275 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4); 269 276 break; 270 277 default: 271 278 /* we limit the inlined version to 3 moves */ ··· 356 363 return res; 357 364 } 358 365 359 - #define __copy_from_user(to, from, n) \ 360 - (__builtin_constant_p(n) ? \ 361 - __constant_copy_from_user(to, from, n) : \ 362 - __generic_copy_from_user(to, from, n)) 366 + static inline unsigned long 367 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 368 + { 369 + if (__builtin_constant_p(n)) 370 + return __constant_copy_from_user(to, from, n); 371 + return __generic_copy_from_user(to, from, n); 372 + } 363 373 364 - #define __copy_to_user(to, from, n) \ 365 - (__builtin_constant_p(n) ? \ 366 - __constant_copy_to_user(to, from, n) : \ 367 - __generic_copy_to_user(to, from, n)) 368 - 369 - #define __copy_to_user_inatomic __copy_to_user 370 - #define __copy_from_user_inatomic __copy_from_user 371 - 372 - #define copy_from_user(to, from, n) __copy_from_user(to, from, n) 373 - #define copy_to_user(to, from, n) __copy_to_user(to, from, n) 374 + static inline unsigned long 375 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 376 + { 377 + if (__builtin_constant_p(n)) 378 + return __constant_copy_to_user(to, from, n); 379 + return __generic_copy_to_user(to, from, n); 380 + } 381 + #define INLINE_COPY_FROM_USER 382 + #define INLINE_COPY_TO_USER 374 383 375 384 #define user_addr_max() \ 376 - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 385 + (uaccess_kernel() ? ~0UL : TASK_SIZE) 377 386 378 387 extern long strncpy_from_user(char *dst, const char __user *src, long count); 379 388 extern __must_check long strlen_user(const char __user *str);
+14 -29
arch/m68k/include/asm/uaccess_no.h
··· 4 4 /* 5 5 * User space memory access functions 6 6 */ 7 - #include <linux/sched.h> 8 7 #include <linux/mm.h> 9 8 #include <linux/string.h> 10 9 11 10 #include <asm/segment.h> 12 - 13 - #define VERIFY_READ 0 14 - #define VERIFY_WRITE 1 15 11 16 12 #define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) 17 13 ··· 21 25 { 22 26 return 1; 23 27 } 24 - 25 - /* 26 - * The exception table consists of pairs of addresses: the first is the 27 - * address of an instruction that is allowed to fault, and the second is 28 - * the address at which the program should continue. No registers are 29 - * modified, so it is entirely up to the continuation code to figure out 30 - * what to do. 31 - * 32 - * All the routines below use bits of fixup code that are out of line 33 - * with the main instruction path. This means when everything is well, 34 - * we don't even have to jump over them. Further, they do not intrude 35 - * on our cache or tlb entries. 36 - */ 37 - 38 - struct exception_table_entry 39 - { 40 - unsigned long insn, fixup; 41 - }; 42 - 43 28 44 29 /* 45 30 * These are the main single-value transfer routines. They automatically ··· 101 124 : "=d" (x) \ 102 125 : "m" (*__ptr(ptr))) 103 126 104 - #define copy_from_user(to, from, n) (memcpy(to, from, n), 0) 105 - #define copy_to_user(to, from, n) (memcpy(to, from, n), 0) 127 + static inline unsigned long 128 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 129 + { 130 + memcpy(to, (__force const void *)from, n); 131 + return 0; 132 + } 106 133 107 - #define __copy_from_user(to, from, n) copy_from_user(to, from, n) 108 - #define __copy_to_user(to, from, n) copy_to_user(to, from, n) 109 - #define __copy_to_user_inatomic __copy_to_user 110 - #define __copy_from_user_inatomic __copy_from_user 134 + static inline unsigned long 135 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 136 + { 137 + memcpy((__force void *)to, from, n); 138 + return 0; 139 + } 140 + #define INLINE_COPY_FROM_USER 141 + #define INLINE_COPY_TO_USER 111 142 112 143 /* 113 144 * Copy a null terminated string from userspace.
+1 -1
arch/m68k/kernel/signal.c
··· 88 88 return frame_size_change[f]; 89 89 } 90 90 91 - int handle_kernel_fault(struct pt_regs *regs) 91 + int fixup_exception(struct pt_regs *regs) 92 92 { 93 93 const struct exception_table_entry *fixup; 94 94 struct pt_regs *tregs;
+7 -2
arch/m68k/kernel/traps.c
··· 1016 1016 /* traced a trapping instruction on a 68020/30, 1017 1017 * real exception will be executed afterwards. 1018 1018 */ 1019 - } else if (!handle_kernel_fault(&fp->ptregs)) 1020 - bad_super_trap(fp); 1019 + return; 1020 + } 1021 + #ifdef CONFIG_MMU 1022 + if (fixup_exception(&fp->ptregs)) 1023 + return; 1024 + #endif 1025 + bad_super_trap(fp); 1021 1026 return; 1022 1027 } 1023 1028
+3 -9
arch/m68k/lib/uaccess.c
··· 30 30 "6:\n" 31 31 " .section .fixup,\"ax\"\n" 32 32 " .even\n" 33 - "10: move.l %0,%3\n" 34 - "7: clr.l (%2)+\n" 35 - " subq.l #1,%3\n" 36 - " jne 7b\n" 37 - " lsl.l #2,%0\n" 33 + "10: lsl.l #2,%0\n" 38 34 " btst #1,%5\n" 39 35 " jeq 8f\n" 40 - "30: clr.w (%2)+\n" 41 - " addq.l #2,%0\n" 36 + "30: addq.l #2,%0\n" 42 37 "8: btst #0,%5\n" 43 38 " jeq 6b\n" 44 - "50: clr.b (%2)+\n" 45 - " addq.l #1,%0\n" 39 + "50: addq.l #1,%0\n" 46 40 " jra 6b\n" 47 41 " .previous\n" 48 42 "\n"
+1 -1
arch/m68k/mm/fault.c
··· 32 32 force_sig_info(siginfo.si_signo, 33 33 &siginfo, current); 34 34 } else { 35 - if (handle_kernel_fault(regs)) 35 + if (fixup_exception(regs)) 36 36 return -1; 37 37 38 38 //if (siginfo.si_signo == SIGBUS)
+1
arch/metag/include/asm/Kbuild
··· 8 8 generic-y += emergency-restart.h 9 9 generic-y += errno.h 10 10 generic-y += exec.h 11 + generic-y += extable.h 11 12 generic-y += fb.h 12 13 generic-y += fcntl.h 13 14 generic-y += futex.h
+4 -56
arch/metag/include/asm/uaccess.h
··· 4 4 /* 5 5 * User space memory access functions 6 6 */ 7 - #include <linux/sched.h> 8 - 9 - #define VERIFY_READ 0 10 - #define VERIFY_WRITE 1 11 7 12 8 /* 13 9 * The fs value determines whether argument validity checking should be ··· 24 28 25 29 #define segment_eq(a, b) ((a).seg == (b).seg) 26 30 27 - #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 31 + #define __kernel_ok (uaccess_kernel()) 28 32 /* 29 33 * Explicitly allow NULL pointers here. Parts of the kernel such 30 34 * as readv/writev use access_ok to validate pointers, but want ··· 47 51 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ 48 52 (unsigned long)(size)) 49 53 50 - static inline int verify_area(int type, const void *addr, unsigned long size) 51 - { 52 - return access_ok(type, addr, size) ? 0 : -EFAULT; 53 - } 54 - 55 - /* 56 - * The exception table consists of pairs of addresses: the first is the 57 - * address of an instruction that is allowed to fault, and the second is 58 - * the address at which the program should continue. No registers are 59 - * modified, so it is entirely up to the continuation code to figure out 60 - * what to do. 61 - * 62 - * All the routines below use bits of fixup code that are out of line 63 - * with the main instruction path. This means when everything is well, 64 - * we don't even have to jump over them. Further, they do not intrude 65 - * on our cache or tlb entries. 66 - */ 67 - struct exception_table_entry { 68 - unsigned long insn, fixup; 69 - }; 70 - 71 - extern int fixup_exception(struct pt_regs *regs); 54 + #include <asm/extable.h> 72 55 73 56 /* 74 57 * These are the main single-value transfer routines. They automatically ··· 174 199 175 200 extern unsigned long raw_copy_from_user(void *to, const void __user *from, 176 201 unsigned long n); 177 - 178 - static inline unsigned long 179 - copy_from_user(void *to, const void __user *from, unsigned long n) 180 - { 181 - unsigned long res = n; 182 - if (likely(access_ok(VERIFY_READ, from, n))) 183 - res = raw_copy_from_user(to, from, n); 184 - if (unlikely(res)) 185 - memset(to + (n - res), 0, res); 186 - return res; 187 - } 188 - 189 - #define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) 190 - #define __copy_from_user_inatomic __copy_from_user 191 - 192 - extern unsigned long __must_check __copy_user(void __user *to, 193 - const void *from, 194 - unsigned long n); 195 - 196 - static inline unsigned long copy_to_user(void __user *to, const void *from, 197 - unsigned long n) 198 - { 199 - if (access_ok(VERIFY_WRITE, to, n)) 200 - return __copy_user(to, from, n); 201 - return n; 202 - } 203 - 204 - #define __copy_to_user(to, from, n) __copy_user(to, from, n) 205 - #define __copy_to_user_inatomic __copy_to_user 202 + extern unsigned long raw_copy_to_user(void __user *to, const void *from, 203 + unsigned long n); 206 204 207 205 /* 208 206 * Zero Userspace
+3 -3
arch/metag/lib/usercopy.c
··· 548 548 "SUB %1, %1, D0Ar2\n" \ 549 549 "SUB %3, %3, D1Ar1\n") 550 550 551 - unsigned long __copy_user(void __user *pdst, const void *psrc, 552 - unsigned long n) 551 + unsigned long raw_copy_to_user(void __user *pdst, const void *psrc, 552 + unsigned long n) 553 553 { 554 554 register char __user *dst asm ("A0.2") = pdst; 555 555 register const char *src asm ("A1.2") = psrc; ··· 654 654 */ 655 655 return retn; 656 656 } 657 - EXPORT_SYMBOL(__copy_user); 657 + EXPORT_SYMBOL(raw_copy_to_user); 658 658 659 659 #define __asm_copy_from_user_1(to, from, ret) \ 660 660 __asm_copy_user_cont(to, from, ret, \
+1
arch/microblaze/include/asm/Kbuild
··· 3 3 generic-y += clkdev.h 4 4 generic-y += device.h 5 5 generic-y += exec.h 6 + generic-y += extable.h 6 7 generic-y += irq_work.h 7 8 generic-y += mcs_spinlock.h 8 9 generic-y += mm-arch-hooks.h
+9 -55
arch/microblaze/include/asm/uaccess.h
··· 11 11 #ifndef _ASM_MICROBLAZE_UACCESS_H 12 12 #define _ASM_MICROBLAZE_UACCESS_H 13 13 14 - #ifdef __KERNEL__ 15 - #ifndef __ASSEMBLY__ 16 - 17 14 #include <linux/kernel.h> 18 - #include <linux/errno.h> 19 - #include <linux/sched.h> /* RLIMIT_FSIZE */ 20 15 #include <linux/mm.h> 21 16 22 17 #include <asm/mmu.h> 23 18 #include <asm/page.h> 24 19 #include <asm/pgtable.h> 20 + #include <asm/extable.h> 25 21 #include <linux/string.h> 26 - 27 - #define VERIFY_READ 0 28 - #define VERIFY_WRITE 1 29 22 30 23 /* 31 24 * On Microblaze the fs value is actually the top of the corresponding ··· 47 54 # define set_fs(val) (current_thread_info()->addr_limit = (val)) 48 55 49 56 # define segment_eq(a, b) ((a).seg == (b).seg) 50 - 51 - /* 52 - * The exception table consists of pairs of addresses: the first is the 53 - * address of an instruction that is allowed to fault, and the second is 54 - * the address at which the program should continue. No registers are 55 - * modified, so it is entirely up to the continuation code to figure out 56 - * what to do. 57 - * 58 - * All the routines below use bits of fixup code that are out of line 59 - * with the main instruction path. This means when everything is well, 60 - * we don't even have to jump over them. Further, they do not intrude 61 - * on our cache or tlb entries. 62 - */ 63 - struct exception_table_entry { 64 - unsigned long insn, fixup; 65 - }; 66 57 67 58 #ifndef CONFIG_MMU 68 59 ··· 336 359 __gu_err; \ 337 360 }) 338 361 339 - 340 - /* copy_to_from_user */ 341 - #define __copy_from_user(to, from, n) \ 342 - __copy_tofrom_user((__force void __user *)(to), \ 343 - (void __user *)(from), (n)) 344 - #define __copy_from_user_inatomic(to, from, n) \ 345 - __copy_from_user((to), (from), (n)) 346 - 347 - static inline long copy_from_user(void *to, 348 - const void __user *from, unsigned long n) 362 + static inline unsigned long 363 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 349 364 { 350 - unsigned long res = n; 351 - might_fault(); 352 - if (likely(access_ok(VERIFY_READ, from, n))) 353 - res = __copy_from_user(to, from, n); 354 - if (unlikely(res)) 355 - memset(to + (n - res), 0, res); 356 - return res; 365 + return __copy_tofrom_user((__force void __user *)to, from, n); 357 366 } 358 367 359 - #define __copy_to_user(to, from, n) \ 360 - __copy_tofrom_user((void __user *)(to), \ 361 - (__force const void __user *)(from), (n)) 362 - #define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n)) 363 - 364 - static inline long copy_to_user(void __user *to, 365 - const void *from, unsigned long n) 368 + static inline unsigned long 369 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 366 370 { 367 - might_fault(); 368 - if (access_ok(VERIFY_WRITE, to, n)) 369 - return __copy_to_user(to, from, n); 370 - return n; 371 + return __copy_tofrom_user(to, (__force const void __user *)from, n); 371 372 } 373 + #define INLINE_COPY_FROM_USER 374 + #define INLINE_COPY_TO_USER 372 375 373 376 /* 374 377 * Copy a null terminated string from userspace. ··· 378 421 return 0; 379 422 return __strnlen_user(src, n); 380 423 } 381 - 382 - #endif /* __ASSEMBLY__ */ 383 - #endif /* __KERNEL__ */ 384 424 385 425 #endif /* _ASM_MICROBLAZE_UACCESS_H */
-1
arch/mips/Kconfig
··· 68 68 select HANDLE_DOMAIN_IRQ 69 69 select HAVE_EXIT_THREAD 70 70 select HAVE_REGS_AND_STACK_ACCESS_API 71 - select HAVE_ARCH_HARDENED_USERCOPY 72 71 73 72 menu "Machine selection" 74 73
+1 -30
arch/mips/cavium-octeon/octeon-memcpy.S
··· 140 140 .set noat 141 141 142 142 /* 143 - * t7 is used as a flag to note inatomic mode. 144 - */ 145 - LEAF(__copy_user_inatomic) 146 - EXPORT_SYMBOL(__copy_user_inatomic) 147 - b __copy_user_common 148 - li t7, 1 149 - END(__copy_user_inatomic) 150 - 151 - /* 152 143 * A combined memcpy/__copy_user 153 144 * __copy_user sets len to 0 for success; else to an upper bound of 154 145 * the number of uncopied bytes. ··· 152 161 __memcpy: 153 162 FEXPORT(__copy_user) 154 163 EXPORT_SYMBOL(__copy_user) 155 - li t7, 0 /* not inatomic */ 156 - __copy_user_common: 157 164 /* 158 165 * Note: dst & src may be unaligned, len may be 0 159 166 * Temps ··· 403 414 LOAD t0, TI_TASK($28) 404 415 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address 405 416 SUB len, AT, t0 # len number of uncopied bytes 406 - bnez t7, 2f /* Skip the zeroing out part if inatomic */ 407 - /* 408 - * Here's where we rely on src and dst being incremented in tandem, 409 - * See (3) above. 410 - * dst += (fault addr - src) to put dst at first byte to clear 411 - */ 412 - ADD dst, t0 # compute start address in a1 413 - SUB dst, src 414 - /* 415 - * Clear len bytes starting at dst. Can't call __bzero because it 416 - * might modify len. An inefficient loop for these rare times... 417 - */ 418 - beqz len, done 419 - SUB src, len, 1 420 - 1: sb zero, 0(dst) 421 - ADD dst, dst, 1 422 - bnez src, 1b 423 - SUB src, src, 1 424 - 2: jr ra 417 + jr ra 425 418 nop 426 419 427 420
+2 -2
arch/mips/include/asm/checksum.h
··· 50 50 __wsum sum, int *err_ptr) 51 51 { 52 52 might_fault(); 53 - if (segment_eq(get_fs(), get_ds())) 53 + if (uaccess_kernel()) 54 54 return __csum_partial_copy_kernel((__force void *)src, dst, 55 55 len, sum, err_ptr); 56 56 else ··· 82 82 { 83 83 might_fault(); 84 84 if (access_ok(VERIFY_WRITE, dst, len)) { 85 - if (segment_eq(get_fs(), get_ds())) 85 + if (uaccess_kernel()) 86 86 return __csum_partial_copy_kernel(src, 87 87 (__force void *)dst, 88 88 len, sum, err_ptr);
+2 -2
arch/mips/include/asm/r4kcache.h
··· 20 20 #include <asm/cpu-features.h> 21 21 #include <asm/cpu-type.h> 22 22 #include <asm/mipsmtregs.h> 23 - #include <linux/uaccess.h> /* for segment_eq() */ 23 + #include <linux/uaccess.h> /* for uaccess_kernel() */ 24 24 25 25 extern void (*r4k_blast_dcache)(void); 26 26 extern void (*r4k_blast_icache)(void); ··· 714 714 \ 715 715 __##pfx##flush_prologue \ 716 716 \ 717 - if (segment_eq(get_fs(), USER_DS)) { \ 717 + if (!uaccess_kernel()) { \ 718 718 while (1) { \ 719 719 protected_cachee_op(hitop, addr); \ 720 720 if (addr == aend) \
+73 -402
arch/mips/include/asm/uaccess.h
··· 12 12 #define _ASM_UACCESS_H 13 13 14 14 #include <linux/kernel.h> 15 - #include <linux/errno.h> 16 - #include <linux/thread_info.h> 17 15 #include <linux/string.h> 18 16 #include <asm/asm-eva.h> 19 17 #include <asm/extable.h> ··· 69 71 #define USER_DS ((mm_segment_t) { __UA_LIMIT }) 70 72 #endif 71 73 72 - #define VERIFY_READ 0 73 - #define VERIFY_WRITE 1 74 - 75 74 #define get_ds() (KERNEL_DS) 76 75 #define get_fs() (current_thread_info()->addr_limit) 77 76 #define set_fs(x) (current_thread_info()->addr_limit = (x)) ··· 88 93 if (!IS_ENABLED(CONFIG_EVA)) 89 94 return false; 90 95 91 - return segment_eq(get_fs(), get_ds()); 96 + return uaccess_kernel(); 92 97 } 93 98 94 99 /* ··· 128 133 * this function, memory access functions may still return -EFAULT. 129 134 */ 130 135 131 - #define __access_mask get_fs().seg 132 - 133 - #define __access_ok(addr, size, mask) \ 134 - ({ \ 135 - unsigned long __addr = (unsigned long) (addr); \ 136 - unsigned long __size = size; \ 137 - unsigned long __mask = mask; \ 138 - unsigned long __ok; \ 139 - \ 140 - __chk_user_ptr(addr); \ 141 - __ok = (signed long)(__mask & (__addr | (__addr + __size) | \ 142 - __ua_size(__size))); \ 143 - __ok == 0; \ 144 - }) 136 + static inline int __access_ok(const void __user *p, unsigned long size) 137 + { 138 + unsigned long addr = (unsigned long)p; 139 + return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; 140 + } 145 141 146 142 #define access_ok(type, addr, size) \ 147 - likely(__access_ok((addr), (size), __access_mask)) 143 + likely(__access_ok((addr), (size))) 148 144 149 145 /* 150 146 * put_user: - Write a simple value into user space. ··· 797 811 798 812 extern size_t __copy_user(void *__to, const void *__from, size_t __n); 799 813 800 - #ifndef CONFIG_EVA 801 - #define __invoke_copy_to_user(to, from, n) \ 814 + #define __invoke_copy_from(func, to, from, n) \ 815 + ({ \ 816 + register void *__cu_to_r __asm__("$4"); \ 817 + register const void __user *__cu_from_r __asm__("$5"); \ 818 + register long __cu_len_r __asm__("$6"); \ 819 + \ 820 + __cu_to_r = (to); \ 821 + __cu_from_r = (from); \ 822 + __cu_len_r = (n); \ 823 + __asm__ __volatile__( \ 824 + ".set\tnoreorder\n\t" \ 825 + __MODULE_JAL(func) \ 826 + ".set\tnoat\n\t" \ 827 + __UA_ADDU "\t$1, %1, %2\n\t" \ 828 + ".set\tat\n\t" \ 829 + ".set\treorder" \ 830 + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 831 + : \ 832 + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 833 + DADDI_SCRATCH, "memory"); \ 834 + __cu_len_r; \ 835 + }) 836 + 837 + #define __invoke_copy_to(func, to, from, n) \ 802 838 ({ \ 803 839 register void __user *__cu_to_r __asm__("$4"); \ 804 840 register const void *__cu_from_r __asm__("$5"); \ ··· 830 822 __cu_from_r = (from); \ 831 823 __cu_len_r = (n); \ 832 824 __asm__ __volatile__( \ 833 - __MODULE_JAL(__copy_user) \ 834 - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 835 - : \ 836 - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 837 - DADDI_SCRATCH, "memory"); \ 838 - __cu_len_r; \ 839 - }) 840 - 841 - #define __invoke_copy_to_kernel(to, from, n) \ 842 - __invoke_copy_to_user(to, from, n) 843 - 844 - #endif 845 - 846 - /* 847 - * __copy_to_user: - Copy a block of data into user space, with less checking. 848 - * @to: Destination address, in user space. 849 - * @from: Source address, in kernel space. 850 - * @n: Number of bytes to copy. 851 - * 852 - * Context: User context only. This function may sleep if pagefaults are 853 - * enabled. 854 - * 855 - * Copy data from kernel space to user space. Caller must check 856 - * the specified block with access_ok() before calling this function. 857 - * 858 - * Returns number of bytes that could not be copied. 859 - * On success, this will be zero. 860 - */ 861 - #define __copy_to_user(to, from, n) \ 862 - ({ \ 863 - void __user *__cu_to; \ 864 - const void *__cu_from; \ 865 - long __cu_len; \ 866 - \ 867 - __cu_to = (to); \ 868 - __cu_from = (from); \ 869 - __cu_len = (n); \ 870 - \ 871 - check_object_size(__cu_from, __cu_len, true); \ 872 - might_fault(); \ 873 - \ 874 - if (eva_kernel_access()) \ 875 - __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ 876 - __cu_len); \ 877 - else \ 878 - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 879 - __cu_len); \ 880 - __cu_len; \ 881 - }) 882 - 883 - extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); 884 - 885 - #define __copy_to_user_inatomic(to, from, n) \ 886 - ({ \ 887 - void __user *__cu_to; \ 888 - const void *__cu_from; \ 889 - long __cu_len; \ 890 - \ 891 - __cu_to = (to); \ 892 - __cu_from = (from); \ 893 - __cu_len = (n); \ 894 - \ 895 - check_object_size(__cu_from, __cu_len, true); \ 896 - \ 897 - if (eva_kernel_access()) \ 898 - __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ 899 - __cu_len); \ 900 - else \ 901 - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 902 - __cu_len); \ 903 - __cu_len; \ 904 - }) 905 - 906 - #define __copy_from_user_inatomic(to, from, n) \ 907 - ({ \ 908 - void *__cu_to; \ 909 - const void __user *__cu_from; \ 910 - long __cu_len; \ 911 - \ 912 - __cu_to = (to); \ 913 - __cu_from = (from); \ 914 - __cu_len = (n); \ 915 - \ 916 - check_object_size(__cu_to, __cu_len, false); \ 917 - \ 918 - if (eva_kernel_access()) \ 919 - __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \ 920 - __cu_from,\ 921 - __cu_len);\ 922 - else \ 923 - __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \ 924 - __cu_from, \ 925 - __cu_len); \ 926 - __cu_len; \ 927 - }) 928 - 929 - /* 930 - * copy_to_user: - Copy a block of data into user space. 931 - * @to: Destination address, in user space. 932 - * @from: Source address, in kernel space. 933 - * @n: Number of bytes to copy. 934 - * 935 - * Context: User context only. This function may sleep if pagefaults are 936 - * enabled. 937 - * 938 - * Copy data from kernel space to user space. 939 - * 940 - * Returns number of bytes that could not be copied. 941 - * On success, this will be zero. 942 - */ 943 - #define copy_to_user(to, from, n) \ 944 - ({ \ 945 - void __user *__cu_to; \ 946 - const void *__cu_from; \ 947 - long __cu_len; \ 948 - \ 949 - __cu_to = (to); \ 950 - __cu_from = (from); \ 951 - __cu_len = (n); \ 952 - \ 953 - check_object_size(__cu_from, __cu_len, true); \ 954 - \ 955 - if (eva_kernel_access()) { \ 956 - __cu_len = __invoke_copy_to_kernel(__cu_to, \ 957 - __cu_from, \ 958 - __cu_len); \ 959 - } else { \ 960 - if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ 961 - might_fault(); \ 962 - __cu_len = __invoke_copy_to_user(__cu_to, \ 963 - __cu_from, \ 964 - __cu_len); \ 965 - } \ 966 - } \ 967 - __cu_len; \ 968 - }) 969 - 970 - #ifndef CONFIG_EVA 971 - 972 - #define __invoke_copy_from_user(to, from, n) \ 973 - ({ \ 974 - register void *__cu_to_r __asm__("$4"); \ 975 - register const void __user *__cu_from_r __asm__("$5"); \ 976 - register long __cu_len_r __asm__("$6"); \ 977 - \ 978 - __cu_to_r = (to); \ 979 - __cu_from_r = (from); \ 980 - __cu_len_r = (n); \ 981 - __asm__ __volatile__( \ 982 - ".set\tnoreorder\n\t" \ 983 - __MODULE_JAL(__copy_user) \ 984 - ".set\tnoat\n\t" \ 985 - __UA_ADDU "\t$1, %1, %2\n\t" \ 986 - ".set\tat\n\t" \ 987 - ".set\treorder" \ 825 + __MODULE_JAL(func) \ 988 826 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 989 827 : \ 990 828 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ ··· 839 985 }) 840 986 841 987 #define __invoke_copy_from_kernel(to, from, n) \ 842 - __invoke_copy_from_user(to, from, n) 988 + __invoke_copy_from(__copy_user, to, from, n) 843 989 844 - /* For userland <-> userland operations */ 845 - #define ___invoke_copy_in_user(to, from, n) \ 846 - __invoke_copy_from_user(to, from, n) 990 + #define __invoke_copy_to_kernel(to, from, n) \ 991 + __invoke_copy_to(__copy_user, to, from, n) 847 992 848 - /* For kernel <-> kernel operations */ 849 993 #define ___invoke_copy_in_kernel(to, from, n) \ 850 - __invoke_copy_from_user(to, from, n) 994 + __invoke_copy_from(__copy_user, to, from, n) 851 995 852 - #define __invoke_copy_from_user_inatomic(to, from, n) \ 853 - ({ \ 854 - register void *__cu_to_r __asm__("$4"); \ 855 - register const void __user *__cu_from_r __asm__("$5"); \ 856 - register long __cu_len_r __asm__("$6"); \ 857 - \ 858 - __cu_to_r = (to); \ 859 - __cu_from_r = (from); \ 860 - __cu_len_r = (n); \ 861 - __asm__ __volatile__( \ 862 - ".set\tnoreorder\n\t" \ 863 - __MODULE_JAL(__copy_user_inatomic) \ 864 - ".set\tnoat\n\t" \ 865 - __UA_ADDU "\t$1, %1, %2\n\t" \ 866 - ".set\tat\n\t" \ 867 - ".set\treorder" \ 868 - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 869 - : \ 870 - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 871 - DADDI_SCRATCH, "memory"); \ 872 - __cu_len_r; \ 873 - }) 996 + #ifndef CONFIG_EVA 997 + #define __invoke_copy_from_user(to, from, n) \ 998 + __invoke_copy_from(__copy_user, to, from, n) 874 999 875 - #define __invoke_copy_from_kernel_inatomic(to, from, n) \ 876 - __invoke_copy_from_user_inatomic(to, from, n) \ 1000 + #define __invoke_copy_to_user(to, from, n) \ 1001 + __invoke_copy_to(__copy_user, to, from, n) 1002 + 1003 + #define ___invoke_copy_in_user(to, from, n) \ 1004 + __invoke_copy_from(__copy_user, to, from, n) 877 1005 878 1006 #else 879 1007 880 1008 /* EVA specific functions */ 881 1009 882 - extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, 883 - size_t __n); 884 1010 extern size_t __copy_from_user_eva(void *__to, const void *__from, 885 1011 size_t __n); 886 1012 extern size_t __copy_to_user_eva(void *__to, const void *__from, 887 1013 size_t __n); 888 1014 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); 889 1015 890 - #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \ 891 - ({ \ 892 - register void *__cu_to_r __asm__("$4"); \ 893 - register const void __user *__cu_from_r __asm__("$5"); \ 894 - register long __cu_len_r __asm__("$6"); \ 895 - \ 896 - __cu_to_r = (to); \ 897 - __cu_from_r = (from); \ 898 - __cu_len_r = (n); \ 899 - __asm__ __volatile__( \ 900 - ".set\tnoreorder\n\t" \ 901 - __MODULE_JAL(func_ptr) \ 902 - ".set\tnoat\n\t" \ 903 - __UA_ADDU "\t$1, %1, %2\n\t" \ 904 - ".set\tat\n\t" \ 905 - ".set\treorder" \ 906 - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 907 - : \ 908 - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 909 - DADDI_SCRATCH, "memory"); \ 910 - __cu_len_r; \ 911 - }) 912 - 913 - #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \ 914 - ({ \ 915 - register void *__cu_to_r __asm__("$4"); \ 916 - register const void __user *__cu_from_r __asm__("$5"); \ 917 - register long __cu_len_r __asm__("$6"); \ 918 - \ 919 - __cu_to_r = (to); \ 920 - __cu_from_r = (from); \ 921 - __cu_len_r = (n); \ 922 - __asm__ __volatile__( \ 923 - __MODULE_JAL(func_ptr) \ 924 - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 925 - : \ 926 - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 927 - DADDI_SCRATCH, "memory"); \ 928 - __cu_len_r; \ 929 - }) 930 - 931 1016 /* 932 1017 * Source or destination address is in userland. We need to go through 933 1018 * the TLB 934 1019 */ 935 1020 #define __invoke_copy_from_user(to, from, n) \ 936 - __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva) 937 - 938 - #define __invoke_copy_from_user_inatomic(to, from, n) \ 939 - __invoke_copy_from_user_eva_generic(to, from, n, \ 940 - __copy_user_inatomic_eva) 1021 + __invoke_copy_from(__copy_from_user_eva, to, from, n) 941 1022 942 1023 #define __invoke_copy_to_user(to, from, n) \ 943 - __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva) 1024 + __invoke_copy_to(__copy_to_user_eva, to, from, n) 944 1025 945 1026 #define ___invoke_copy_in_user(to, from, n) \ 946 - __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva) 947 - 948 - /* 949 - * Source or destination address in the kernel. We are not going through 950 - * the TLB 951 - */ 952 - #define __invoke_copy_from_kernel(to, from, n) \ 953 - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) 954 - 955 - #define __invoke_copy_from_kernel_inatomic(to, from, n) \ 956 - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic) 957 - 958 - #define __invoke_copy_to_kernel(to, from, n) \ 959 - __invoke_copy_to_user_eva_generic(to, from, n, __copy_user) 960 - 961 - #define ___invoke_copy_in_kernel(to, from, n) \ 962 - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) 1027 + __invoke_copy_from(__copy_in_user_eva, to, from, n) 963 1028 964 1029 #endif /* CONFIG_EVA */ 965 1030 966 - /* 967 - * __copy_from_user: - Copy a block of data from user space, with less checking. 968 - * @to: Destination address, in kernel space. 969 - * @from: Source address, in user space. 970 - * @n: Number of bytes to copy. 971 - * 972 - * Context: User context only. This function may sleep if pagefaults are 973 - * enabled. 974 - * 975 - * Copy data from user space to kernel space. Caller must check 976 - * the specified block with access_ok() before calling this function. 977 - * 978 - * Returns number of bytes that could not be copied. 979 - * On success, this will be zero. 980 - * 981 - * If some data could not be copied, this function will pad the copied 982 - * data to the requested size using zero bytes. 983 - */ 984 - #define __copy_from_user(to, from, n) \ 985 - ({ \ 986 - void *__cu_to; \ 987 - const void __user *__cu_from; \ 988 - long __cu_len; \ 989 - \ 990 - __cu_to = (to); \ 991 - __cu_from = (from); \ 992 - __cu_len = (n); \ 993 - \ 994 - check_object_size(__cu_to, __cu_len, false); \ 995 - \ 996 - if (eva_kernel_access()) { \ 997 - __cu_len = __invoke_copy_from_kernel(__cu_to, \ 998 - __cu_from, \ 999 - __cu_len); \ 1000 - } else { \ 1001 - might_fault(); \ 1002 - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1003 - __cu_len); \ 1004 - } \ 1005 - __cu_len; \ 1006 - }) 1031 + static inline unsigned long 1032 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 1033 + { 1034 + if (eva_kernel_access()) 1035 + return __invoke_copy_to_kernel(to, from, n); 1036 + else 1037 + return __invoke_copy_to_user(to, from, n); 1038 + } 1007 1039 1008 - /* 1009 - * copy_from_user: - Copy a block of data from user space. 1010 - * @to: Destination address, in kernel space. 1011 - * @from: Source address, in user space. 1012 - * @n: Number of bytes to copy. 1013 - * 1014 - * Context: User context only. This function may sleep if pagefaults are 1015 - * enabled. 1016 - * 1017 - * Copy data from user space to kernel space. 1018 - * 1019 - * Returns number of bytes that could not be copied. 1020 - * On success, this will be zero. 1021 - * 1022 - * If some data could not be copied, this function will pad the copied 1023 - * data to the requested size using zero bytes. 1024 - */ 1025 - #define copy_from_user(to, from, n) \ 1026 - ({ \ 1027 - void *__cu_to; \ 1028 - const void __user *__cu_from; \ 1029 - long __cu_len; \ 1030 - \ 1031 - __cu_to = (to); \ 1032 - __cu_from = (from); \ 1033 - __cu_len = (n); \ 1034 - \ 1035 - check_object_size(__cu_to, __cu_len, false); \ 1036 - \ 1037 - if (eva_kernel_access()) { \ 1038 - __cu_len = __invoke_copy_from_kernel(__cu_to, \ 1039 - __cu_from, \ 1040 - __cu_len); \ 1041 - } else { \ 1042 - if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ 1043 - might_fault(); \ 1044 - __cu_len = __invoke_copy_from_user(__cu_to, \ 1045 - __cu_from, \ 1046 - __cu_len); \ 1047 - } else { \ 1048 - memset(__cu_to, 0, __cu_len); \ 1049 - } \ 1050 - } \ 1051 - __cu_len; \ 1052 - }) 1040 + static inline unsigned long 1041 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 1042 + { 1043 + if (eva_kernel_access()) 1044 + return __invoke_copy_from_kernel(to, from, n); 1045 + else 1046 + return __invoke_copy_from_user(to, from, n); 1047 + } 1053 1048 1054 - #define __copy_in_user(to, from, n) \ 1055 - ({ \ 1056 - void __user *__cu_to; \ 1057 - const void __user *__cu_from; \ 1058 - long __cu_len; \ 1059 - \ 1060 - __cu_to = (to); \ 1061 - __cu_from = (from); \ 1062 - __cu_len = (n); \ 1063 - if (eva_kernel_access()) { \ 1064 - __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \ 1065 - __cu_len); \ 1066 - } else { \ 1067 - might_fault(); \ 1068 - __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \ 1069 - __cu_len); \ 1070 - } \ 1071 - __cu_len; \ 1072 - }) 1049 + #define INLINE_COPY_FROM_USER 1050 + #define INLINE_COPY_TO_USER 1073 1051 1074 - #define copy_in_user(to, from, n) \ 1075 - ({ \ 1076 - void __user *__cu_to; \ 1077 - const void __user *__cu_from; \ 1078 - long __cu_len; \ 1079 - \ 1080 - __cu_to = (to); \ 1081 - __cu_from = (from); \ 1082 - __cu_len = (n); \ 1083 - if (eva_kernel_access()) { \ 1084 - __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \ 1085 - __cu_len); \ 1086 - } else { \ 1087 - if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\ 1088 - access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\ 1089 - might_fault(); \ 1090 - __cu_len = ___invoke_copy_in_user(__cu_to, \ 1091 - __cu_from, \ 1092 - __cu_len); \ 1093 - } \ 1094 - } \ 1095 - __cu_len; \ 1096 - }) 1052 + static inline unsigned long 1053 + raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) 1054 + { 1055 + if (eva_kernel_access()) 1056 + return ___invoke_copy_in_kernel(to, from, n); 1057 + else 1058 + return ___invoke_copy_in_user(to, from, n); 1059 + } 1097 1060 1098 1061 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); 1099 1062 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
+12 -12
arch/mips/kernel/mips-r2-to-r6-emul.c
··· 1200 1200 case lwl_op: 1201 1201 rt = regs->regs[MIPSInst_RT(inst)]; 1202 1202 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1203 - if (!access_ok(VERIFY_READ, vaddr, 4)) { 1203 + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { 1204 1204 current->thread.cp0_baduaddr = vaddr; 1205 1205 err = SIGSEGV; 1206 1206 break; ··· 1273 1273 case lwr_op: 1274 1274 rt = regs->regs[MIPSInst_RT(inst)]; 1275 1275 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1276 - if (!access_ok(VERIFY_READ, vaddr, 4)) { 1276 + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { 1277 1277 current->thread.cp0_baduaddr = vaddr; 1278 1278 err = SIGSEGV; 1279 1279 break; ··· 1347 1347 case swl_op: 1348 1348 rt = regs->regs[MIPSInst_RT(inst)]; 1349 1349 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1350 - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 1350 + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { 1351 1351 current->thread.cp0_baduaddr = vaddr; 1352 1352 err = SIGSEGV; 1353 1353 break; ··· 1417 1417 case swr_op: 1418 1418 rt = regs->regs[MIPSInst_RT(inst)]; 1419 1419 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1420 - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 1420 + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { 1421 1421 current->thread.cp0_baduaddr = vaddr; 1422 1422 err = SIGSEGV; 1423 1423 break; ··· 1492 1492 1493 1493 rt = regs->regs[MIPSInst_RT(inst)]; 1494 1494 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1495 - if (!access_ok(VERIFY_READ, vaddr, 8)) { 1495 + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { 1496 1496 current->thread.cp0_baduaddr = vaddr; 1497 1497 err = SIGSEGV; 1498 1498 break; ··· 1611 1611 1612 1612 rt = regs->regs[MIPSInst_RT(inst)]; 1613 1613 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1614 - if (!access_ok(VERIFY_READ, vaddr, 8)) { 1614 + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { 1615 1615 current->thread.cp0_baduaddr = vaddr; 1616 1616 err = SIGSEGV; 1617 1617 break; ··· 1730 1730 1731 1731 rt = regs->regs[MIPSInst_RT(inst)]; 1732 1732 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1733 - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 1733 + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { 1734 1734 current->thread.cp0_baduaddr = vaddr; 1735 1735 err = SIGSEGV; 1736 1736 break; ··· 1848 1848 1849 1849 rt = regs->regs[MIPSInst_RT(inst)]; 1850 1850 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1851 - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 1851 + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { 1852 1852 current->thread.cp0_baduaddr = vaddr; 1853 1853 err = SIGSEGV; 1854 1854 break; ··· 1965 1965 err = SIGBUS; 1966 1966 break; 1967 1967 } 1968 - if (!access_ok(VERIFY_READ, vaddr, 4)) { 1968 + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { 1969 1969 current->thread.cp0_baduaddr = vaddr; 1970 1970 err = SIGBUS; 1971 1971 break; ··· 2021 2021 err = SIGBUS; 2022 2022 break; 2023 2023 } 2024 - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 2024 + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { 2025 2025 current->thread.cp0_baduaddr = vaddr; 2026 2026 err = SIGBUS; 2027 2027 break; ··· 2084 2084 err = SIGBUS; 2085 2085 break; 2086 2086 } 2087 - if (!access_ok(VERIFY_READ, vaddr, 8)) { 2087 + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { 2088 2088 current->thread.cp0_baduaddr = vaddr; 2089 2089 err = SIGBUS; 2090 2090 break; ··· 2145 2145 err = SIGBUS; 2146 2146 break; 2147 2147 } 2148 - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 2148 + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { 2149 2149 current->thread.cp0_baduaddr = vaddr; 2150 2150 err = SIGBUS; 2151 2151 break;
+1 -1
arch/mips/kernel/syscall.c
··· 98 98 if (unlikely(addr & 3)) 99 99 return -EINVAL; 100 100 101 - if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) 101 + if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4))) 102 102 return -EINVAL; 103 103 104 104 if (cpu_has_llsc && R10000_LLSC_WAR) {
+5 -5
arch/mips/kernel/unaligned.c
··· 1026 1026 goto sigbus; 1027 1027 1028 1028 if (IS_ENABLED(CONFIG_EVA)) { 1029 - if (segment_eq(get_fs(), get_ds())) 1029 + if (uaccess_kernel()) 1030 1030 LoadHW(addr, value, res); 1031 1031 else 1032 1032 LoadHWE(addr, value, res); ··· 1045 1045 goto sigbus; 1046 1046 1047 1047 if (IS_ENABLED(CONFIG_EVA)) { 1048 - if (segment_eq(get_fs(), get_ds())) 1048 + if (uaccess_kernel()) 1049 1049 LoadW(addr, value, res); 1050 1050 else 1051 1051 LoadWE(addr, value, res); ··· 1064 1064 goto sigbus; 1065 1065 1066 1066 if (IS_ENABLED(CONFIG_EVA)) { 1067 - if (segment_eq(get_fs(), get_ds())) 1067 + if (uaccess_kernel()) 1068 1068 LoadHWU(addr, value, res); 1069 1069 else 1070 1070 LoadHWUE(addr, value, res); ··· 1132 1132 value = regs->regs[insn.i_format.rt]; 1133 1133 1134 1134 if (IS_ENABLED(CONFIG_EVA)) { 1135 - if (segment_eq(get_fs(), get_ds())) 1135 + if (uaccess_kernel()) 1136 1136 StoreHW(addr, value, res); 1137 1137 else 1138 1138 StoreHWE(addr, value, res); ··· 1152 1152 value = regs->regs[insn.i_format.rt]; 1153 1153 1154 1154 if (IS_ENABLED(CONFIG_EVA)) { 1155 - if (segment_eq(get_fs(), get_ds())) 1155 + if (uaccess_kernel()) 1156 1156 StoreW(addr, value, res); 1157 1157 else 1158 1158 StoreWE(addr, value, res);
-49
arch/mips/lib/memcpy.S
··· 562 562 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address 563 563 nop 564 564 SUB len, AT, t0 # len number of uncopied bytes 565 - bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */ 566 - /* 567 - * Here's where we rely on src and dst being incremented in tandem, 568 - * See (3) above. 569 - * dst += (fault addr - src) to put dst at first byte to clear 570 - */ 571 - ADD dst, t0 # compute start address in a1 572 - SUB dst, src 573 - /* 574 - * Clear len bytes starting at dst. Can't call __bzero because it 575 - * might modify len. An inefficient loop for these rare times... 576 - */ 577 - .set reorder /* DADDI_WAR */ 578 - SUB src, len, 1 579 - beqz len, .Ldone\@ 580 - .set noreorder 581 - 1: sb zero, 0(dst) 582 - ADD dst, dst, 1 583 - #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 584 - bnez src, 1b 585 - SUB src, src, 1 586 - #else 587 - .set push 588 - .set noat 589 - li v1, 1 590 - bnez src, 1b 591 - SUB src, src, v1 592 - .set pop 593 - #endif 594 565 jr ra 595 566 nop 596 - 597 567 598 568 #define SEXC(n) \ 599 569 .set reorder; /* DADDI_WAR */ \ ··· 643 673 END(__rmemcpy) 644 674 645 675 /* 646 - * t6 is used as a flag to note inatomic mode. 647 - */ 648 - LEAF(__copy_user_inatomic) 649 - EXPORT_SYMBOL(__copy_user_inatomic) 650 - b __copy_user_common 651 - li t6, 1 652 - END(__copy_user_inatomic) 653 - 654 - /* 655 676 * A combined memcpy/__copy_user 656 677 * __copy_user sets len to 0 for success; else to an upper bound of 657 678 * the number of uncopied bytes. ··· 655 694 .L__memcpy: 656 695 FEXPORT(__copy_user) 657 696 EXPORT_SYMBOL(__copy_user) 658 - li t6, 0 /* not inatomic */ 659 - __copy_user_common: 660 697 /* Legacy Mode, user <-> user */ 661 698 __BUILD_COPY_USER LEGACY_MODE USEROP USEROP 662 699 ··· 667 708 * space 668 709 */ 669 710 670 - LEAF(__copy_user_inatomic_eva) 671 - EXPORT_SYMBOL(__copy_user_inatomic_eva) 672 - b __copy_from_user_common 673 - li t6, 1 674 - END(__copy_user_inatomic_eva) 675 - 676 711 /* 677 712 * __copy_from_user (EVA) 678 713 */ 679 714 680 715 LEAF(__copy_from_user_eva) 681 716 EXPORT_SYMBOL(__copy_from_user_eva) 682 - li t6, 0 /* not inatomic */ 683 - __copy_from_user_common: 684 717 __BUILD_COPY_USER EVA_MODE USEROP KERNELOP 685 718 END(__copy_from_user_eva) 686 719
+1 -1
arch/mips/oprofile/backtrace.c
··· 18 18 static inline int get_mem(unsigned long addr, unsigned long *result) 19 19 { 20 20 unsigned long *address = (unsigned long *) addr; 21 - if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) 21 + if (!access_ok(VERIFY_READ, address, sizeof(unsigned long))) 22 22 return -1; 23 23 if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) 24 24 return -3;
+1
arch/mn10300/include/asm/Kbuild
··· 2 2 generic-y += barrier.h 3 3 generic-y += clkdev.h 4 4 generic-y += exec.h 5 + generic-y += extable.h 5 6 generic-y += irq_work.h 6 7 generic-y += mcs_spinlock.h 7 8 generic-y += mm-arch-hooks.h
+6 -181
arch/mn10300/include/asm/uaccess.h
··· 14 14 /* 15 15 * User space memory access functions 16 16 */ 17 - #include <linux/thread_info.h> 18 17 #include <linux/kernel.h> 19 18 #include <asm/page.h> 20 - #include <asm/errno.h> 21 - 22 - #define VERIFY_READ 0 23 - #define VERIFY_WRITE 1 24 19 25 20 /* 26 21 * The fs value determines whether argument validity checking should be ··· 66 71 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) 67 72 #define __access_ok(addr, size) (__range_ok((addr), (size)) == 0) 68 73 69 - /* 70 - * The exception table consists of pairs of addresses: the first is the 71 - * address of an instruction that is allowed to fault, and the second is 72 - * the address at which the program should continue. No registers are 73 - * modified, so it is entirely up to the continuation code to figure out 74 - * what to do. 75 - * 76 - * All the routines below use bits of fixup code that are out of line 77 - * with the main instruction path. This means when everything is well, 78 - * we don't even have to jump over them. Further, they do not intrude 79 - * on our cache or tlb entries. 80 - */ 81 - 82 - struct exception_table_entry 83 - { 84 - unsigned long insn, fixup; 85 - }; 86 - 87 - /* Returns 0 if exception not found and fixup otherwise. */ 88 - extern int fixup_exception(struct pt_regs *regs); 74 + #include <asm/extable.h> 89 75 90 76 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) 91 77 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) ··· 275 299 } \ 276 300 } while (0) 277 301 278 - #define __copy_user_zeroing(to, from, size) \ 279 - do { \ 280 - if (size) { \ 281 - void *__to = to; \ 282 - const void *__from = from; \ 283 - int w; \ 284 - asm volatile( \ 285 - "0: movbu (%0),%3;\n" \ 286 - "1: movbu %3,(%1);\n" \ 287 - " inc %0;\n" \ 288 - " inc %1;\n" \ 289 - " add -1,%2;\n" \ 290 - " bne 0b;\n" \ 291 - "2:\n" \ 292 - " .section .fixup,\"ax\"\n" \ 293 - "3:\n" \ 294 - " mov %2,%0\n" \ 295 - " clr %3\n" \ 296 - "4: movbu %3,(%1);\n" \ 297 - " inc %1;\n" \ 298 - " add -1,%2;\n" \ 299 - " bne 4b;\n" \ 300 - " mov %0,%2\n" \ 301 - " jmp 2b\n" \ 302 - " .previous\n" \ 303 - " .section __ex_table,\"a\"\n" \ 304 - " .balign 4\n" \ 305 - " .long 0b,3b\n" \ 306 - " .long 1b,3b\n" \ 307 - " .previous\n" \ 308 - : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\ 309 - : "0"(__from), "1"(__to), "2"(size) \ 310 - : "cc", "memory"); \ 311 - } \ 312 - } while (0) 313 - 314 - /* We let the __ versions of copy_from/to_user inline, because they're often 315 - * used in fast paths and have only a small space overhead. 316 - */ 317 - static inline 318 - unsigned long __generic_copy_from_user_nocheck(void *to, const void *from, 319 - unsigned long n) 320 - { 321 - __copy_user_zeroing(to, from, n); 322 - return n; 323 - } 324 - 325 - static inline 326 - unsigned long __generic_copy_to_user_nocheck(void *to, const void *from, 327 - unsigned long n) 302 + static inline unsigned long 303 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 328 304 { 329 305 __copy_user(to, from, n); 330 306 return n; 331 307 } 332 308 333 - 334 - #if 0 335 - #error "don't use - these macros don't increment to & from pointers" 336 - /* Optimize just a little bit when we know the size of the move. */ 337 - #define __constant_copy_user(to, from, size) \ 338 - do { \ 339 - asm volatile( \ 340 - " mov %0,a0;\n" \ 341 - "0: movbu (%1),d3;\n" \ 342 - "1: movbu d3,(%2);\n" \ 343 - " add -1,a0;\n" \ 344 - " bne 0b;\n" \ 345 - "2:;" \ 346 - ".section .fixup,\"ax\"\n" \ 347 - "3: jmp 2b\n" \ 348 - ".previous\n" \ 349 - ".section __ex_table,\"a\"\n" \ 350 - " .balign 4\n" \ 351 - " .long 0b,3b\n" \ 352 - " .long 1b,3b\n" \ 353 - ".previous" \ 354 - : \ 355 - : "d"(size), "d"(to), "d"(from) \ 356 - : "d3", "a0"); \ 357 - } while (0) 358 - 359 - /* Optimize just a little bit when we know the size of the move. */ 360 - #define __constant_copy_user_zeroing(to, from, size) \ 361 - do { \ 362 - asm volatile( \ 363 - " mov %0,a0;\n" \ 364 - "0: movbu (%1),d3;\n" \ 365 - "1: movbu d3,(%2);\n" \ 366 - " add -1,a0;\n" \ 367 - " bne 0b;\n" \ 368 - "2:;" \ 369 - ".section .fixup,\"ax\"\n" \ 370 - "3: jmp 2b\n" \ 371 - ".previous\n" \ 372 - ".section __ex_table,\"a\"\n" \ 373 - " .balign 4\n" \ 374 - " .long 0b,3b\n" \ 375 - " .long 1b,3b\n" \ 376 - ".previous" \ 377 - : \ 378 - : "d"(size), "d"(to), "d"(from) \ 379 - : "d3", "a0"); \ 380 - } while (0) 381 - 382 - static inline 383 - unsigned long __constant_copy_to_user(void *to, const void *from, 384 - unsigned long n) 309 + static inline unsigned long 310 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 385 311 { 386 - if (access_ok(VERIFY_WRITE, to, n)) 387 - __constant_copy_user(to, from, n); 312 + __copy_user(to, from, n); 388 313 return n; 389 314 } 390 - 391 - static inline 392 - unsigned long __constant_copy_from_user(void *to, const void *from, 393 - unsigned long n) 394 - { 395 - if (access_ok(VERIFY_READ, from, n)) 396 - __constant_copy_user_zeroing(to, from, n); 397 - return n; 398 - } 399 - 400 - static inline 401 - unsigned long __constant_copy_to_user_nocheck(void *to, const void *from, 402 - unsigned long n) 403 - { 404 - __constant_copy_user(to, from, n); 405 - return n; 406 - } 407 - 408 - static inline 409 - unsigned long __constant_copy_from_user_nocheck(void *to, const void *from, 410 - unsigned long n) 411 - { 412 - __constant_copy_user_zeroing(to, from, n); 413 - return n; 414 - } 415 - #endif 416 - 417 - extern unsigned long __generic_copy_to_user(void __user *, const void *, 418 - unsigned long); 419 - extern unsigned long __generic_copy_from_user(void *, const void __user *, 420 - unsigned long); 421 - 422 - #define __copy_to_user_inatomic(to, from, n) \ 423 - __generic_copy_to_user_nocheck((to), (from), (n)) 424 - #define __copy_from_user_inatomic(to, from, n) \ 425 - __generic_copy_from_user_nocheck((to), (from), (n)) 426 - 427 - #define __copy_to_user(to, from, n) \ 428 - ({ \ 429 - might_fault(); \ 430 - __copy_to_user_inatomic((to), (from), (n)); \ 431 - }) 432 - 433 - #define __copy_from_user(to, from, n) \ 434 - ({ \ 435 - might_fault(); \ 436 - __copy_from_user_inatomic((to), (from), (n)); \ 437 - }) 438 - 439 - 440 - #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) 441 - #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) 442 315 443 316 extern long strncpy_from_user(char *dst, const char __user *src, long count); 444 317 extern long __strncpy_from_user(char *dst, const char __user *src, long count);
-2
arch/mn10300/kernel/mn10300_ksyms.c
··· 26 26 EXPORT_SYMBOL(__strncpy_from_user); 27 27 EXPORT_SYMBOL(clear_user); 28 28 EXPORT_SYMBOL(__clear_user); 29 - EXPORT_SYMBOL(__generic_copy_from_user); 30 - EXPORT_SYMBOL(__generic_copy_to_user); 31 29 EXPORT_SYMBOL(strnlen_user); 32 30 33 31 extern u64 __ashrdi3(u64, unsigned);
-18
arch/mn10300/lib/usercopy.c
··· 11 11 */ 12 12 #include <linux/uaccess.h> 13 13 14 - unsigned long 15 - __generic_copy_to_user(void *to, const void *from, unsigned long n) 16 - { 17 - if (access_ok(VERIFY_WRITE, to, n)) 18 - __copy_user(to, from, n); 19 - return n; 20 - } 21 - 22 - unsigned long 23 - __generic_copy_from_user(void *to, const void *from, unsigned long n) 24 - { 25 - if (access_ok(VERIFY_READ, from, n)) 26 - __copy_user_zeroing(to, from, n); 27 - else 28 - memset(to, 0, n); 29 - return n; 30 - } 31 - 32 14 /* 33 15 * Copy a null terminated string from userspace. 34 16 */
+1
arch/nios2/include/asm/Kbuild
··· 13 13 generic-y += emergency-restart.h 14 14 generic-y += errno.h 15 15 generic-y += exec.h 16 + generic-y += extable.h 16 17 generic-y += fb.h 17 18 generic-y += fcntl.h 18 19 generic-y += ftrace.h
+7 -48
arch/nios2/include/asm/uaccess.h
··· 13 13 #ifndef _ASM_NIOS2_UACCESS_H 14 14 #define _ASM_NIOS2_UACCESS_H 15 15 16 - #include <linux/errno.h> 17 - #include <linux/thread_info.h> 18 16 #include <linux/string.h> 19 17 20 18 #include <asm/page.h> 21 19 22 - #define VERIFY_READ 0 23 - #define VERIFY_WRITE 1 24 - 25 - /* 26 - * The exception table consists of pairs of addresses: the first is the 27 - * address of an instruction that is allowed to fault, and the second is 28 - * the address at which the program should continue. No registers are 29 - * modified, so it is entirely up to the continuation code to figure out 30 - * what to do. 31 - * 32 - * All the routines below use bits of fixup code that are out of line 33 - * with the main instruction path. This means when everything is well, 34 - * we don't even have to jump over them. Further, they do not intrude 35 - * on our cache or tlb entries. 36 - */ 37 - struct exception_table_entry { 38 - unsigned long insn; 39 - unsigned long fixup; 40 - }; 41 - 42 - extern int fixup_exception(struct pt_regs *regs); 20 + #include <asm/extable.h> 43 21 44 22 /* 45 23 * Segment stuff ··· 73 95 return __clear_user(to, n); 74 96 } 75 97 76 - extern long __copy_from_user(void *to, const void __user *from, 77 - unsigned long n); 78 - extern long __copy_to_user(void __user *to, const void *from, unsigned long n); 79 - 80 - static inline long copy_from_user(void *to, const void __user *from, 81 - unsigned long n) 82 - { 83 - unsigned long res = n; 84 - if (access_ok(VERIFY_READ, from, n)) 85 - res = __copy_from_user(to, from, n); 86 - if (unlikely(res)) 87 - memset(to + (n - res), 0, res); 88 - return res; 89 - } 90 - 91 - static inline long copy_to_user(void __user *to, const void *from, 92 - unsigned long n) 93 - { 94 - if (!access_ok(VERIFY_WRITE, to, n)) 95 - return n; 96 - return __copy_to_user(to, from, n); 97 - } 98 + extern unsigned long 99 + raw_copy_from_user(void *to, const void __user *from, unsigned long n); 100 + extern unsigned long 101 + raw_copy_to_user(void __user *to, const void *from, unsigned long n); 102 + #define INLINE_COPY_FROM_USER 103 + #define INLINE_COPY_TO_USER 98 104 99 105 extern long strncpy_from_user(char *__to, const char __user *__from, 100 106 long __len); 101 107 extern long strnlen_user(const char __user *s, long n); 102 - 103 - #define __copy_from_user_inatomic __copy_from_user 104 - #define __copy_to_user_inatomic __copy_to_user 105 108 106 109 /* Optimized macros */ 107 110 #define __get_user_asm(val, insn, addr, err) \
+8 -8
arch/nios2/mm/uaccess.c
··· 10 10 #include <linux/export.h> 11 11 #include <linux/uaccess.h> 12 12 13 - asm(".global __copy_from_user\n" 14 - " .type __copy_from_user, @function\n" 15 - "__copy_from_user:\n" 13 + asm(".global raw_copy_from_user\n" 14 + " .type raw_copy_from_user, @function\n" 15 + "raw_copy_from_user:\n" 16 16 " movi r2,7\n" 17 17 " mov r3,r4\n" 18 18 " bge r2,r6,1f\n" ··· 65 65 ".word 7b,13b\n" 66 66 ".previous\n" 67 67 ); 68 - EXPORT_SYMBOL(__copy_from_user); 68 + EXPORT_SYMBOL(raw_copy_from_user); 69 69 70 70 asm( 71 - " .global __copy_to_user\n" 72 - " .type __copy_to_user, @function\n" 73 - "__copy_to_user:\n" 71 + " .global raw_copy_to_user\n" 72 + " .type raw_copy_to_user, @function\n" 73 + "raw_copy_to_user:\n" 74 74 " movi r2,7\n" 75 75 " mov r3,r4\n" 76 76 " bge r2,r6,1f\n" ··· 127 127 ".word 11b,13b\n" 128 128 ".word 12b,13b\n" 129 129 ".previous\n"); 130 - EXPORT_SYMBOL(__copy_to_user); 130 + EXPORT_SYMBOL(raw_copy_to_user); 131 131 132 132 long strncpy_from_user(char *__to, const char __user *__from, long __len) 133 133 {
+1
arch/openrisc/include/asm/Kbuild
··· 16 16 generic-y += emergency-restart.h 17 17 generic-y += errno.h 18 18 generic-y += exec.h 19 + generic-y += extable.h 19 20 generic-y += fb.h 20 21 generic-y += fcntl.h 21 22 generic-y += ftrace.h
+8 -45
arch/openrisc/include/asm/uaccess.h
··· 22 22 /* 23 23 * User space memory access functions 24 24 */ 25 - #include <linux/errno.h> 26 - #include <linux/thread_info.h> 27 25 #include <linux/prefetch.h> 28 26 #include <linux/string.h> 29 27 #include <asm/page.h> 30 - 31 - #define VERIFY_READ 0 32 - #define VERIFY_WRITE 1 28 + #include <asm/extable.h> 33 29 34 30 /* 35 31 * The fs value determines whether argument validity checking should be ··· 60 64 61 65 #define access_ok(type, addr, size) \ 62 66 __range_ok((unsigned long)addr, (unsigned long)size) 63 - 64 - /* 65 - * The exception table consists of pairs of addresses: the first is the 66 - * address of an instruction that is allowed to fault, and the second is 67 - * the address at which the program should continue. No registers are 68 - * modified, so it is entirely up to the continuation code to figure out 69 - * what to do. 70 - * 71 - * All the routines below use bits of fixup code that are out of line 72 - * with the main instruction path. This means when everything is well, 73 - * we don't even have to jump over them. Further, they do not intrude 74 - * on our cache or tlb entries. 75 - */ 76 - 77 - struct exception_table_entry { 78 - unsigned long insn, fixup; 79 - }; 80 67 81 68 /* 82 69 * These are the main single-value transfer routines. They automatically ··· 236 257 237 258 extern unsigned long __must_check 238 259 __copy_tofrom_user(void *to, const void *from, unsigned long size); 239 - 240 - #define __copy_from_user(to, from, size) \ 241 - __copy_tofrom_user(to, from, size) 242 - #define __copy_to_user(to, from, size) \ 243 - __copy_tofrom_user(to, from, size) 244 - 245 - #define __copy_to_user_inatomic __copy_to_user 246 - #define __copy_from_user_inatomic __copy_from_user 247 - 248 260 static inline unsigned long 249 - copy_from_user(void *to, const void *from, unsigned long n) 261 + raw_copy_from_user(void *to, const void __user *from, unsigned long size) 250 262 { 251 - unsigned long res = n; 252 - 253 - if (likely(access_ok(VERIFY_READ, from, n))) 254 - res = __copy_tofrom_user(to, from, n); 255 - if (unlikely(res)) 256 - memset(to + (n - res), 0, res); 257 - return res; 263 + return __copy_tofrom_user(to, (__force const void *)from, size); 258 264 } 259 - 260 265 static inline unsigned long 261 - copy_to_user(void *to, const void *from, unsigned long n) 266 + raw_copy_to_user(void *to, const void __user *from, unsigned long size) 262 267 { 263 - if (likely(access_ok(VERIFY_WRITE, to, n))) 264 - n = __copy_tofrom_user(to, from, n); 265 - return n; 268 + return __copy_tofrom_user((__force void *)to, from, size); 266 269 } 270 + #define INLINE_COPY_FROM_USER 271 + #define INLINE_COPY_TO_USER 267 272 268 273 extern unsigned long __clear_user(void *addr, unsigned long size); 269 274 ··· 260 297 } 261 298 262 299 #define user_addr_max() \ 263 - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 300 + (uaccess_kernel() ? ~0UL : TASK_SIZE) 264 301 265 302 extern long strncpy_from_user(char *dest, const char __user *src, long count); 266 303
-1
arch/parisc/Kconfig
··· 26 26 select SYSCTL_ARCH_UNALIGN_ALLOW 27 27 select SYSCTL_EXCEPTION_TRACE 28 28 select HAVE_MOD_ARCH_SPECIFIC 29 - select HAVE_ARCH_HARDENED_USERCOPY 30 29 select VIRT_TO_BUS 31 30 select MODULES_USE_ELF_RELA 32 31 select CLONE_BACKWARDS
+1 -1
arch/parisc/include/asm/futex.h
··· 109 109 /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is 110 110 * our gateway page, and causes no end of trouble... 111 111 */ 112 - if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) 112 + if (uaccess_kernel() && !uaddr) 113 113 return -EFAULT; 114 114 115 115 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+8 -61
arch/parisc/include/asm/uaccess.h
··· 6 6 */ 7 7 #include <asm/page.h> 8 8 #include <asm/cache.h> 9 - #include <asm/errno.h> 10 9 #include <asm-generic/uaccess-unaligned.h> 11 10 12 11 #include <linux/bug.h> 13 12 #include <linux/string.h> 14 - #include <linux/thread_info.h> 15 - 16 - #define VERIFY_READ 0 17 - #define VERIFY_WRITE 1 18 13 19 14 #define KERNEL_DS ((mm_segment_t){0}) 20 15 #define USER_DS ((mm_segment_t){1}) ··· 211 216 * Complex access routines -- external declarations 212 217 */ 213 218 214 - extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); 215 - extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); 216 - extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); 217 219 extern long strncpy_from_user(char *, const char __user *, long); 218 220 extern unsigned lclear_user(void __user *, unsigned long); 219 221 extern long lstrnlen_user(const char __user *, long); ··· 224 232 #define clear_user lclear_user 225 233 #define __clear_user lclear_user 226 234 227 - unsigned long __must_check __copy_to_user(void __user *dst, const void *src, 228 - unsigned long len); 229 - unsigned long __must_check __copy_from_user(void *dst, const void __user *src, 230 - unsigned long len); 231 - unsigned long copy_in_user(void __user *dst, const void __user *src, 232 - unsigned long len); 233 - #define __copy_in_user copy_in_user 234 - #define __copy_to_user_inatomic __copy_to_user 235 - #define __copy_from_user_inatomic __copy_from_user 236 - 237 - extern void __compiletime_error("usercopy buffer size is too small") 238 - __bad_copy_user(void); 239 - 240 - static inline void copy_user_overflow(int size, unsigned long count) 241 - { 242 - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 243 - } 244 - 245 - static __always_inline unsigned long __must_check 246 - copy_from_user(void *to, const void __user *from, unsigned long n) 247 - { 248 - int sz = __compiletime_object_size(to); 249 - unsigned long ret = n; 250 - 251 - if (likely(sz < 0 || sz >= n)) { 252 - check_object_size(to, n, false); 253 - ret = __copy_from_user(to, from, n); 254 - } else if (!__builtin_constant_p(n)) 255 - copy_user_overflow(sz, n); 256 - else 257 - __bad_copy_user(); 258 - 259 - if (unlikely(ret)) 260 - memset(to + (n - ret), 0, ret); 261 - 262 - return ret; 263 - } 264 - 265 - static __always_inline unsigned long __must_check 266 - copy_to_user(void __user *to, const void *from, unsigned long n) 267 - { 268 - int sz = __compiletime_object_size(from); 269 - 270 - if (likely(sz < 0 || sz >= n)) { 271 - check_object_size(from, n, true); 272 - n = __copy_to_user(to, from, n); 273 - } else if (!__builtin_constant_p(n)) 274 - copy_user_overflow(sz, n); 275 - else 276 - __bad_copy_user(); 277 - 278 - return n; 279 - } 235 + unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, 236 + unsigned long len); 237 + unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, 238 + unsigned long len); 239 + unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src, 240 + unsigned long len); 241 + #define INLINE_COPY_TO_USER 242 + #define INLINE_COPY_FROM_USER 280 243 281 244 struct pt_regs; 282 245 int fixup_exception(struct pt_regs *regs);
+8 -8
arch/parisc/lib/memcpy.c
··· 29 29 30 30 DECLARE_PER_CPU(struct exception_data, exception_data); 31 31 32 - #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) 32 + #define get_user_space() (uaccess_kernel() ? 0 : mfsp(3)) 33 33 #define get_kernel_space() (0) 34 34 35 35 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ 36 36 extern unsigned long pa_memcpy(void *dst, const void *src, 37 37 unsigned long len); 38 38 39 - unsigned long __copy_to_user(void __user *dst, const void *src, 40 - unsigned long len) 39 + unsigned long raw_copy_to_user(void __user *dst, const void *src, 40 + unsigned long len) 41 41 { 42 42 mtsp(get_kernel_space(), 1); 43 43 mtsp(get_user_space(), 2); 44 44 return pa_memcpy((void __force *)dst, src, len); 45 45 } 46 - EXPORT_SYMBOL(__copy_to_user); 46 + EXPORT_SYMBOL(raw_copy_to_user); 47 47 48 - unsigned long __copy_from_user(void *dst, const void __user *src, 48 + unsigned long raw_copy_from_user(void *dst, const void __user *src, 49 49 unsigned long len) 50 50 { 51 51 mtsp(get_user_space(), 1); 52 52 mtsp(get_kernel_space(), 2); 53 53 return pa_memcpy(dst, (void __force *)src, len); 54 54 } 55 - EXPORT_SYMBOL(__copy_from_user); 55 + EXPORT_SYMBOL(raw_copy_from_user); 56 56 57 - unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len) 57 + unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len) 58 58 { 59 59 mtsp(get_user_space(), 1); 60 60 mtsp(get_user_space(), 2); ··· 70 70 return dst; 71 71 } 72 72 73 - EXPORT_SYMBOL(copy_in_user); 73 + EXPORT_SYMBOL(raw_copy_in_user); 74 74 EXPORT_SYMBOL(memcpy); 75 75 76 76 long probe_kernel_read(void *dst, const void *src, size_t size)
-1
arch/powerpc/Kconfig
··· 117 117 select GENERIC_STRNLEN_USER 118 118 select GENERIC_TIME_VSYSCALL_OLD 119 119 select HAVE_ARCH_AUDITSYSCALL 120 - select HAVE_ARCH_HARDENED_USERCOPY 121 120 select HAVE_ARCH_JUMP_LABEL 122 121 select HAVE_ARCH_KGDB 123 122 select HAVE_ARCH_SECCOMP_FILTER
+29
arch/powerpc/include/asm/extable.h
··· 1 + #ifndef _ARCH_POWERPC_EXTABLE_H 2 + #define _ARCH_POWERPC_EXTABLE_H 3 + 4 + /* 5 + * The exception table consists of pairs of relative addresses: the first is 6 + * the address of an instruction that is allowed to fault, and the second is 7 + * the address at which the program should continue. No registers are 8 + * modified, so it is entirely up to the continuation code to figure out what 9 + * to do. 10 + * 11 + * All the routines below use bits of fixup code that are out of line with the 12 + * main instruction path. This means when everything is well, we don't even 13 + * have to jump over them. Further, they do not intrude on our cache or tlb 14 + * entries. 15 + */ 16 + 17 + #define ARCH_HAS_RELATIVE_EXTABLE 18 + 19 + struct exception_table_entry { 20 + int insn; 21 + int fixup; 22 + }; 23 + 24 + static inline unsigned long extable_fixup(const struct exception_table_entry *x) 25 + { 26 + return (unsigned long)&x->fixup + x->fixup; 27 + } 28 + 29 + #endif
+10 -86
arch/powerpc/include/asm/uaccess.h
··· 1 1 #ifndef _ARCH_POWERPC_UACCESS_H 2 2 #define _ARCH_POWERPC_UACCESS_H 3 3 4 - #ifdef __KERNEL__ 5 - #ifndef __ASSEMBLY__ 6 - 7 - #include <linux/sched.h> 8 - #include <linux/errno.h> 9 4 #include <asm/asm-compat.h> 10 5 #include <asm/ppc_asm.h> 11 6 #include <asm/processor.h> 12 7 #include <asm/page.h> 13 - 14 - #define VERIFY_READ 0 15 - #define VERIFY_WRITE 1 8 + #include <asm/extable.h> 16 9 17 10 /* 18 11 * The fs value determines whether argument validity checking should be ··· 55 62 #define access_ok(type, addr, size) \ 56 63 (__chk_user_ptr(addr), \ 57 64 __access_ok((__force unsigned long)(addr), (size), get_fs())) 58 - 59 - /* 60 - * The exception table consists of pairs of relative addresses: the first is 61 - * the address of an instruction that is allowed to fault, and the second is 62 - * the address at which the program should continue. No registers are 63 - * modified, so it is entirely up to the continuation code to figure out what 64 - * to do. 65 - * 66 - * All the routines below use bits of fixup code that are out of line with the 67 - * main instruction path. This means when everything is well, we don't even 68 - * have to jump over them. Further, they do not intrude on our cache or tlb 69 - * entries. 70 - */ 71 - 72 - #define ARCH_HAS_RELATIVE_EXTABLE 73 - 74 - struct exception_table_entry { 75 - int insn; 76 - int fixup; 77 - }; 78 - 79 - static inline unsigned long extable_fixup(const struct exception_table_entry *x) 80 - { 81 - return (unsigned long)&x->fixup + x->fixup; 82 - } 83 65 84 66 /* 85 67 * These are the main single-value transfer routines. They automatically ··· 269 301 270 302 #ifndef __powerpc64__ 271 303 272 - static inline unsigned long copy_from_user(void *to, 273 - const void __user *from, unsigned long n) 274 - { 275 - if (likely(access_ok(VERIFY_READ, from, n))) { 276 - check_object_size(to, n, false); 277 - return __copy_tofrom_user((__force void __user *)to, from, n); 278 - } 279 - memset(to, 0, n); 280 - return n; 281 - } 282 - 283 - static inline unsigned long copy_to_user(void __user *to, 284 - const void *from, unsigned long n) 285 - { 286 - if (access_ok(VERIFY_WRITE, to, n)) { 287 - check_object_size(from, n, true); 288 - return __copy_tofrom_user(to, (__force void __user *)from, n); 289 - } 290 - return n; 291 - } 304 + #define INLINE_COPY_FROM_USER 305 + #define INLINE_COPY_TO_USER 292 306 293 307 #else /* __powerpc64__ */ 294 308 295 - #define __copy_in_user(to, from, size) \ 296 - __copy_tofrom_user((to), (from), (size)) 297 - 298 - extern unsigned long copy_from_user(void *to, const void __user *from, 299 - unsigned long n); 300 - extern unsigned long copy_to_user(void __user *to, const void *from, 301 - unsigned long n); 302 - extern unsigned long copy_in_user(void __user *to, const void __user *from, 303 - unsigned long n); 304 - 309 + static inline unsigned long 310 + raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 311 + { 312 + return __copy_tofrom_user(to, from, n); 313 + } 305 314 #endif /* __powerpc64__ */ 306 315 307 - static inline unsigned long __copy_from_user_inatomic(void *to, 316 + static inline unsigned long raw_copy_from_user(void *to, 308 317 const void __user *from, unsigned long n) 309 318 { 310 319 if (__builtin_constant_p(n) && (n <= 8)) { ··· 305 360 return 0; 306 361 } 307 362 308 - check_object_size(to, n, false); 309 - 310 363 return __copy_tofrom_user((__force void __user *)to, from, n); 311 364 } 312 365 313 - static inline unsigned long __copy_to_user_inatomic(void __user *to, 366 + static inline unsigned long raw_copy_to_user(void __user *to, 314 367 const void *from, unsigned long n) 315 368 { 316 369 if (__builtin_constant_p(n) && (n <= 8)) { ··· 332 389 return 0; 333 390 } 334 391 335 - check_object_size(from, n, true); 336 - 337 392 return __copy_tofrom_user(to, (__force const void __user *)from, n); 338 - } 339 - 340 - static inline unsigned long __copy_from_user(void *to, 341 - const void __user *from, unsigned long size) 342 - { 343 - might_fault(); 344 - return __copy_from_user_inatomic(to, from, size); 345 - } 346 - 347 - static inline unsigned long __copy_to_user(void __user *to, 348 - const void *from, unsigned long size) 349 - { 350 - might_fault(); 351 - return __copy_to_user_inatomic(to, from, size); 352 393 } 353 394 354 395 extern unsigned long __clear_user(void __user *addr, unsigned long size); ··· 348 421 extern long strncpy_from_user(char *dst, const char __user *src, long count); 349 422 extern __must_check long strlen_user(const char __user *str); 350 423 extern __must_check long strnlen_user(const char __user *str, long n); 351 - 352 - #endif /* __ASSEMBLY__ */ 353 - #endif /* __KERNEL__ */ 354 424 355 425 #endif /* _ARCH_POWERPC_UACCESS_H */
+1 -1
arch/powerpc/lib/Makefile
··· 14 14 15 15 obj-$(CONFIG_PPC32) += div64.o copy_32.o 16 16 17 - obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \ 17 + obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ 18 18 copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \ 19 19 memcpy_64.o memcmp_64.o 20 20
-14
arch/powerpc/lib/copy_32.S
··· 477 477 bdnz 130b 478 478 /* then clear out the destination: r3 bytes starting at 4(r6) */ 479 479 132: mfctr r3 480 - srwi. r0,r3,2 481 - li r9,0 482 - mtctr r0 483 - beq 113f 484 - 112: stwu r9,4(r6) 485 - bdnz 112b 486 - 113: andi. r0,r3,3 487 - mtctr r0 488 - beq 120f 489 - 114: stb r9,4(r6) 490 - addi r6,r6,1 491 - bdnz 114b 492 480 120: blr 493 481 494 482 EX_TABLE(30b,108b) ··· 485 497 EX_TABLE(41b,111b) 486 498 EX_TABLE(130b,132b) 487 499 EX_TABLE(131b,120b) 488 - EX_TABLE(112b,120b) 489 - EX_TABLE(114b,120b) 490 500 491 501 EXPORT_SYMBOL(__copy_tofrom_user)
+3 -32
arch/powerpc/lib/copyuser_64.S
··· 319 319 blr 320 320 321 321 /* 322 - * here we have trapped again, need to clear ctr bytes starting at r3 322 + * here we have trapped again, amount remaining is in ctr. 323 323 */ 324 - 143: mfctr r5 325 - li r0,0 326 - mr r4,r3 327 - mr r3,r5 /* return the number of bytes not copied */ 328 - 1: andi. r9,r4,7 329 - beq 3f 330 - 90: stb r0,0(r4) 331 - addic. r5,r5,-1 332 - addi r4,r4,1 333 - bne 1b 334 - blr 335 - 3: cmpldi cr1,r5,8 336 - srdi r9,r5,3 337 - andi. r5,r5,7 338 - blt cr1,93f 339 - mtctr r9 340 - 91: std r0,0(r4) 341 - addi r4,r4,8 342 - bdnz 91b 343 - 93: beqlr 344 - mtctr r5 345 - 92: stb r0,0(r4) 346 - addi r4,r4,1 347 - bdnz 92b 324 + 143: mfctr r3 348 325 blr 349 326 350 327 /* ··· 366 389 ld r5,-8(r1) 367 390 add r6,r6,r5 368 391 subf r3,r3,r6 /* #bytes not copied */ 369 - 190: 370 - 191: 371 - 192: 372 - blr /* #bytes not copied in r3 */ 392 + blr 373 393 374 394 EX_TABLE(20b,120b) 375 395 EX_TABLE(220b,320b) ··· 425 451 EX_TABLE(88b,188b) 426 452 EX_TABLE(43b,143b) 427 453 EX_TABLE(89b,189b) 428 - EX_TABLE(90b,190b) 429 - EX_TABLE(91b,191b) 430 - EX_TABLE(92b,192b) 431 454 432 455 /* 433 456 * Routine to copy a whole page of data, optimized for POWER4.
-41
arch/powerpc/lib/usercopy_64.c
··· 1 - /* 2 - * Functions which are too large to be inlined. 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation; either version 7 - * 2 of the License, or (at your option) any later version. 8 - */ 9 - #include <linux/module.h> 10 - #include <linux/uaccess.h> 11 - 12 - unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 13 - { 14 - if (likely(access_ok(VERIFY_READ, from, n))) 15 - n = __copy_from_user(to, from, n); 16 - else 17 - memset(to, 0, n); 18 - return n; 19 - } 20 - 21 - unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 22 - { 23 - if (likely(access_ok(VERIFY_WRITE, to, n))) 24 - n = __copy_to_user(to, from, n); 25 - return n; 26 - } 27 - 28 - unsigned long copy_in_user(void __user *to, const void __user *from, 29 - unsigned long n) 30 - { 31 - might_sleep(); 32 - if (likely(access_ok(VERIFY_READ, from, n) && 33 - access_ok(VERIFY_WRITE, to, n))) 34 - n =__copy_tofrom_user(to, from, n); 35 - return n; 36 - } 37 - 38 - EXPORT_SYMBOL(copy_from_user); 39 - EXPORT_SYMBOL(copy_to_user); 40 - EXPORT_SYMBOL(copy_in_user); 41 -
-1
arch/s390/Kconfig
··· 124 124 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 125 125 select HAVE_ARCH_AUDITSYSCALL 126 126 select HAVE_ARCH_EARLY_PFN_TO_NID 127 - select HAVE_ARCH_HARDENED_USERCOPY 128 127 select HAVE_ARCH_JUMP_LABEL 129 128 select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES 130 129 select HAVE_ARCH_SECCOMP_FILTER
+28
arch/s390/include/asm/extable.h
··· 1 + #ifndef __S390_EXTABLE_H 2 + #define __S390_EXTABLE_H 3 + /* 4 + * The exception table consists of pairs of addresses: the first is the 5 + * address of an instruction that is allowed to fault, and the second is 6 + * the address at which the program should continue. No registers are 7 + * modified, so it is entirely up to the continuation code to figure out 8 + * what to do. 9 + * 10 + * All the routines below use bits of fixup code that are out of line 11 + * with the main instruction path. This means when everything is well, 12 + * we don't even have to jump over them. Further, they do not intrude 13 + * on our cache or tlb entries. 14 + */ 15 + 16 + struct exception_table_entry 17 + { 18 + int insn, fixup; 19 + }; 20 + 21 + static inline unsigned long extable_fixup(const struct exception_table_entry *x) 22 + { 23 + return (unsigned long)&x->fixup + x->fixup; 24 + } 25 + 26 + #define ARCH_HAS_RELATIVE_EXTABLE 27 + 28 + #endif
+11 -142
arch/s390/include/asm/uaccess.h
··· 12 12 /* 13 13 * User space memory access functions 14 14 */ 15 - #include <linux/sched.h> 16 - #include <linux/errno.h> 17 15 #include <asm/processor.h> 18 16 #include <asm/ctl_reg.h> 19 - 20 - #define VERIFY_READ 0 21 - #define VERIFY_WRITE 1 17 + #include <asm/extable.h> 22 18 23 19 24 20 /* ··· 38 42 static inline void set_fs(mm_segment_t fs) 39 43 { 40 44 current->thread.mm_segment = fs; 41 - if (segment_eq(fs, KERNEL_DS)) { 45 + if (uaccess_kernel()) { 42 46 set_cpu_flag(CIF_ASCE_SECONDARY); 43 47 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 44 48 } else { ··· 60 64 61 65 #define access_ok(type, addr, size) __access_ok(addr, size) 62 66 63 - /* 64 - * The exception table consists of pairs of addresses: the first is the 65 - * address of an instruction that is allowed to fault, and the second is 66 - * the address at which the program should continue. No registers are 67 - * modified, so it is entirely up to the continuation code to figure out 68 - * what to do. 69 - * 70 - * All the routines below use bits of fixup code that are out of line 71 - * with the main instruction path. This means when everything is well, 72 - * we don't even have to jump over them. Further, they do not intrude 73 - * on our cache or tlb entries. 74 - */ 67 + unsigned long __must_check 68 + raw_copy_from_user(void *to, const void __user *from, unsigned long n); 75 69 76 - struct exception_table_entry 77 - { 78 - int insn, fixup; 79 - }; 70 + unsigned long __must_check 71 + raw_copy_to_user(void __user *to, const void *from, unsigned long n); 80 72 81 - static inline unsigned long extable_fixup(const struct exception_table_entry *x) 82 - { 83 - return (unsigned long)&x->fixup + x->fixup; 84 - } 85 - 86 - #define ARCH_HAS_RELATIVE_EXTABLE 87 - 88 - /** 89 - * __copy_from_user: - Copy a block of data from user space, with less checking. 90 - * @to: Destination address, in kernel space. 91 - * @from: Source address, in user space. 92 - * @n: Number of bytes to copy. 93 - * 94 - * Context: User context only. This function may sleep if pagefaults are 95 - * enabled. 96 - * 97 - * Copy data from user space to kernel space. Caller must check 98 - * the specified block with access_ok() before calling this function. 99 - * 100 - * Returns number of bytes that could not be copied. 101 - * On success, this will be zero. 102 - * 103 - * If some data could not be copied, this function will pad the copied 104 - * data to the requested size using zero bytes. 105 - */ 106 - unsigned long __must_check __copy_from_user(void *to, const void __user *from, 107 - unsigned long n); 108 - 109 - /** 110 - * __copy_to_user: - Copy a block of data into user space, with less checking. 111 - * @to: Destination address, in user space. 112 - * @from: Source address, in kernel space. 113 - * @n: Number of bytes to copy. 114 - * 115 - * Context: User context only. This function may sleep if pagefaults are 116 - * enabled. 117 - * 118 - * Copy data from kernel space to user space. Caller must check 119 - * the specified block with access_ok() before calling this function. 120 - * 121 - * Returns number of bytes that could not be copied. 122 - * On success, this will be zero. 123 - */ 124 - unsigned long __must_check __copy_to_user(void __user *to, const void *from, 125 - unsigned long n); 126 - 127 - #define __copy_to_user_inatomic __copy_to_user 128 - #define __copy_from_user_inatomic __copy_from_user 73 + #define INLINE_COPY_FROM_USER 74 + #define INLINE_COPY_TO_USER 129 75 130 76 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 131 77 ··· 156 218 157 219 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 158 220 { 159 - size = __copy_to_user(ptr, x, size); 221 + size = raw_copy_to_user(ptr, x, size); 160 222 return size ? -EFAULT : 0; 161 223 } 162 224 163 225 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 164 226 { 165 - size = __copy_from_user(x, ptr, size); 227 + size = raw_copy_from_user(x, ptr, size); 166 228 return size ? -EFAULT : 0; 167 229 } 168 230 ··· 252 314 #define __put_user_unaligned __put_user 253 315 #define __get_user_unaligned __get_user 254 316 255 - extern void __compiletime_error("usercopy buffer size is too small") 256 - __bad_copy_user(void); 257 - 258 - static inline void copy_user_overflow(int size, unsigned long count) 259 - { 260 - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 261 - } 262 - 263 - /** 264 - * copy_to_user: - Copy a block of data into user space. 265 - * @to: Destination address, in user space. 266 - * @from: Source address, in kernel space. 267 - * @n: Number of bytes to copy. 268 - * 269 - * Context: User context only. This function may sleep if pagefaults are 270 - * enabled. 271 - * 272 - * Copy data from kernel space to user space. 273 - * 274 - * Returns number of bytes that could not be copied. 275 - * On success, this will be zero. 276 - */ 277 - static inline unsigned long __must_check 278 - copy_to_user(void __user *to, const void *from, unsigned long n) 279 - { 280 - might_fault(); 281 - return __copy_to_user(to, from, n); 282 - } 283 - 284 - /** 285 - * copy_from_user: - Copy a block of data from user space. 286 - * @to: Destination address, in kernel space. 287 - * @from: Source address, in user space. 288 - * @n: Number of bytes to copy. 289 - * 290 - * Context: User context only. This function may sleep if pagefaults are 291 - * enabled. 292 - * 293 - * Copy data from user space to kernel space. 294 - * 295 - * Returns number of bytes that could not be copied. 296 - * On success, this will be zero. 297 - * 298 - * If some data could not be copied, this function will pad the copied 299 - * data to the requested size using zero bytes. 300 - */ 301 - static inline unsigned long __must_check 302 - copy_from_user(void *to, const void __user *from, unsigned long n) 303 - { 304 - unsigned int sz = __compiletime_object_size(to); 305 - 306 - might_fault(); 307 - if (unlikely(sz != -1 && sz < n)) { 308 - if (!__builtin_constant_p(n)) 309 - copy_user_overflow(sz, n); 310 - else 311 - __bad_copy_user(); 312 - return n; 313 - } 314 - return __copy_from_user(to, from, n); 315 - } 316 - 317 317 unsigned long __must_check 318 - __copy_in_user(void __user *to, const void __user *from, unsigned long n); 319 - 320 - static inline unsigned long __must_check 321 - copy_in_user(void __user *to, const void __user *from, unsigned long n) 322 - { 323 - might_fault(); 324 - return __copy_in_user(to, from, n); 325 - } 318 + raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); 326 319 327 320 /* 328 321 * Copy a null terminated string from userspace.
+23 -45
arch/s390/lib/uaccess.c
··· 26 26 tmp1 = -4096UL; 27 27 asm volatile( 28 28 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" 29 - "9: jz 7f\n" 29 + "6: jz 4f\n" 30 30 "1: algr %0,%3\n" 31 31 " slgr %1,%3\n" 32 32 " slgr %2,%3\n" ··· 35 35 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ 36 36 " slgr %4,%1\n" 37 37 " clgr %0,%4\n" /* copy crosses next page boundary? */ 38 - " jnh 4f\n" 38 + " jnh 5f\n" 39 39 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" 40 - "10:slgr %0,%4\n" 41 - " algr %2,%4\n" 42 - "4: lghi %4,-1\n" 43 - " algr %4,%0\n" /* copy remaining size, subtract 1 */ 44 - " bras %3,6f\n" /* memset loop */ 45 - " xc 0(1,%2),0(%2)\n" 46 - "5: xc 0(256,%2),0(%2)\n" 47 - " la %2,256(%2)\n" 48 - "6: aghi %4,-256\n" 49 - " jnm 5b\n" 50 - " ex %4,0(%3)\n" 51 - " j 8f\n" 52 - "7: slgr %0,%0\n" 53 - "8:\n" 54 - EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) 40 + "7: slgr %0,%4\n" 41 + " j 5f\n" 42 + "4: slgr %0,%0\n" 43 + "5:\n" 44 + EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) 55 45 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 56 46 : "d" (reg0) : "cc", "memory"); 57 47 return size; ··· 57 67 asm volatile( 58 68 " sacf 0\n" 59 69 "0: mvcp 0(%0,%2),0(%1),%3\n" 60 - "10:jz 8f\n" 70 + "7: jz 5f\n" 61 71 "1: algr %0,%3\n" 62 72 " la %1,256(%1)\n" 63 73 " la %2,256(%2)\n" 64 74 "2: mvcp 0(%0,%2),0(%1),%3\n" 65 - "11:jnz 1b\n" 66 - " j 8f\n" 75 + "8: jnz 1b\n" 76 + " j 5f\n" 67 77 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 68 78 " lghi %3,-4096\n" 69 79 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 70 80 " slgr %4,%1\n" 71 81 " clgr %0,%4\n" /* copy crosses next page boundary? */ 72 - " jnh 5f\n" 82 + " jnh 6f\n" 73 83 "4: mvcp 0(%4,%2),0(%1),%3\n" 74 - "12:slgr %0,%4\n" 75 - " algr %2,%4\n" 76 - "5: lghi %4,-1\n" 77 - " algr %4,%0\n" /* copy remaining size, subtract 1 */ 78 - " bras %3,7f\n" /* memset loop */ 79 - " xc 0(1,%2),0(%2)\n" 80 - "6: xc 0(256,%2),0(%2)\n" 81 - " la %2,256(%2)\n" 82 - "7: aghi %4,-256\n" 83 - " jnm 6b\n" 84 - " ex %4,0(%3)\n" 85 - " j 9f\n" 86 - "8: slgr %0,%0\n" 87 - "9: sacf 768\n" 88 - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) 89 - EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) 84 + "9: slgr %0,%4\n" 85 + " j 6f\n" 86 + "5: slgr %0,%0\n" 87 + "6: sacf 768\n" 88 + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) 89 + EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) 90 90 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 91 91 : : "cc", "memory"); 92 92 return size; 93 93 } 94 94 95 - unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 95 + unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) 96 96 { 97 - check_object_size(to, n, false); 98 97 if (static_branch_likely(&have_mvcos)) 99 98 return copy_from_user_mvcos(to, from, n); 100 99 return copy_from_user_mvcp(to, from, n); 101 100 } 102 - EXPORT_SYMBOL(__copy_from_user); 101 + EXPORT_SYMBOL(raw_copy_from_user); 103 102 104 103 static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, 105 104 unsigned long size) ··· 155 176 return size; 156 177 } 157 178 158 - unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 179 + unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) 159 180 { 160 - check_object_size(from, n, true); 161 181 if (static_branch_likely(&have_mvcos)) 162 182 return copy_to_user_mvcos(to, from, n); 163 183 return copy_to_user_mvcs(to, from, n); 164 184 } 165 - EXPORT_SYMBOL(__copy_to_user); 185 + EXPORT_SYMBOL(raw_copy_to_user); 166 186 167 187 static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, 168 188 unsigned long size) ··· 218 240 return size; 219 241 } 220 242 221 - unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) 243 + unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 222 244 { 223 245 if (static_branch_likely(&have_mvcos)) 224 246 return copy_in_user_mvcos(to, from, n); 225 247 return copy_in_user_mvc(to, from, n); 226 248 } 227 - EXPORT_SYMBOL(__copy_in_user); 249 + EXPORT_SYMBOL(raw_copy_in_user); 228 250 229 251 static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) 230 252 {
+1
arch/score/include/asm/Kbuild
··· 4 4 generic-y += barrier.h 5 5 generic-y += clkdev.h 6 6 generic-y += current.h 7 + generic-y += extable.h 7 8 generic-y += irq_work.h 8 9 generic-y += mcs_spinlock.h 9 10 generic-y += mm-arch-hooks.h
-11
arch/score/include/asm/extable.h
··· 1 - #ifndef _ASM_SCORE_EXTABLE_H 2 - #define _ASM_SCORE_EXTABLE_H 3 - 4 - struct exception_table_entry { 5 - unsigned long insn; 6 - unsigned long fixup; 7 - }; 8 - 9 - struct pt_regs; 10 - extern int fixup_exception(struct pt_regs *regs); 11 - #endif
+6 -53
arch/score/include/asm/uaccess.h
··· 2 2 #define __SCORE_UACCESS_H 3 3 4 4 #include <linux/kernel.h> 5 - #include <linux/errno.h> 6 - #include <linux/thread_info.h> 7 5 #include <asm/extable.h> 8 - 9 - #define VERIFY_READ 0 10 - #define VERIFY_WRITE 1 11 6 12 7 #define get_ds() (KERNEL_DS) 13 8 #define get_fs() (current_thread_info()->addr_limit) ··· 295 300 extern int __copy_tofrom_user(void *to, const void *from, unsigned long len); 296 301 297 302 static inline unsigned long 298 - copy_from_user(void *to, const void *from, unsigned long len) 303 + raw_copy_from_user(void *to, const void __user *from, unsigned long len) 299 304 { 300 - unsigned long res = len; 301 - 302 - if (likely(access_ok(VERIFY_READ, from, len))) 303 - res = __copy_tofrom_user(to, from, len); 304 - 305 - if (unlikely(res)) 306 - memset(to + (len - res), 0, res); 307 - 308 - return res; 305 + return __copy_tofrom_user(to, (__force const void *)from, len); 309 306 } 310 307 311 308 static inline unsigned long 312 - copy_to_user(void *to, const void *from, unsigned long len) 309 + raw_copy_to_user(void __user *to, const void *from, unsigned long len) 313 310 { 314 - if (likely(access_ok(VERIFY_WRITE, to, len))) 315 - len = __copy_tofrom_user(to, from, len); 316 - 317 - return len; 311 + return __copy_tofrom_user((__force void *)to, from, len); 318 312 } 319 313 320 - static inline unsigned long 321 - __copy_from_user(void *to, const void *from, unsigned long len) 322 - { 323 - unsigned long left = __copy_tofrom_user(to, from, len); 324 - if (unlikely(left)) 325 - memset(to + (len - left), 0, left); 326 - return left; 327 - } 328 - 329 - #define __copy_to_user(to, from, len) \ 330 - __copy_tofrom_user((to), (from), (len)) 331 - 332 - static inline unsigned long 333 - __copy_to_user_inatomic(void *to, const void *from, unsigned long len) 334 - { 335 - return __copy_to_user(to, from, len); 336 - } 337 - 338 - static inline unsigned long 339 - __copy_from_user_inatomic(void *to, const void *from, unsigned long len) 340 - { 341 - return __copy_tofrom_user(to, from, len); 342 - } 343 - 344 - #define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len) 345 - 346 - static inline unsigned long 347 - copy_in_user(void *to, const void *from, unsigned long len) 348 - { 349 - if (access_ok(VERIFY_READ, from, len) && 350 - access_ok(VERFITY_WRITE, to, len)) 351 - return __copy_tofrom_user(to, from, len); 352 - } 314 + #define INLINE_COPY_FROM_USER 315 + #define INLINE_COPY_TO_USER 353 316 354 317 /* 355 318 * __clear_user: - Zero a block of memory in user space, with less checking.
+10
arch/sh/include/asm/extable.h
··· 1 + #ifndef __ASM_SH_EXTABLE_H 2 + #define __ASM_SH_EXTABLE_H 3 + 4 + #include <asm-generic/extable.h> 5 + 6 + #if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU) 7 + #define ARCH_HAS_SEARCH_EXTABLE 8 + #endif 9 + 10 + #endif
+5 -59
arch/sh/include/asm/uaccess.h
··· 1 1 #ifndef __ASM_SH_UACCESS_H 2 2 #define __ASM_SH_UACCESS_H 3 3 4 - #include <linux/errno.h> 5 - #include <linux/sched.h> 6 4 #include <asm/segment.h> 7 - 8 - #define VERIFY_READ 0 9 - #define VERIFY_WRITE 1 5 + #include <asm/extable.h> 10 6 11 7 #define __addr_ok(addr) \ 12 8 ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg) ··· 108 112 __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 109 113 110 114 static __always_inline unsigned long 111 - __copy_from_user(void *to, const void __user *from, unsigned long n) 115 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 112 116 { 113 117 return __copy_user(to, (__force void *)from, n); 114 118 } 115 119 116 120 static __always_inline unsigned long __must_check 117 - __copy_to_user(void __user *to, const void *from, unsigned long n) 121 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 118 122 { 119 123 return __copy_user((__force void *)to, from, n); 120 124 } 121 - 122 - #define __copy_to_user_inatomic __copy_to_user 123 - #define __copy_from_user_inatomic __copy_from_user 125 + #define INLINE_COPY_FROM_USER 126 + #define INLINE_COPY_TO_USER 124 127 125 128 /* 126 129 * Clear the area and return remaining number of bytes ··· 138 143 \ 139 144 __cl_size; \ 140 145 }) 141 - 142 - static inline unsigned long 143 - copy_from_user(void *to, const void __user *from, unsigned long n) 144 - { 145 - unsigned long __copy_from = (unsigned long) from; 146 - __kernel_size_t __copy_size = (__kernel_size_t) n; 147 - 148 - if (__copy_size && __access_ok(__copy_from, __copy_size)) 149 - __copy_size = __copy_user(to, from, __copy_size); 150 - 151 - if (unlikely(__copy_size)) 152 - memset(to + (n - __copy_size), 0, __copy_size); 153 - 154 - return __copy_size; 155 - } 156 - 157 - static inline unsigned long 158 - copy_to_user(void __user *to, const void *from, unsigned long n) 159 - { 160 - unsigned long __copy_to = (unsigned long) to; 161 - __kernel_size_t __copy_size = (__kernel_size_t) n; 162 - 163 - if (__copy_size && __access_ok(__copy_to, __copy_size)) 164 - return __copy_user(to, from, __copy_size); 165 - 166 - return __copy_size; 167 - } 168 - 169 - /* 170 - * The exception table consists of pairs of addresses: the first is the 171 - * address of an instruction that is allowed to fault, and the second is 172 - * the address at which the program should continue. No registers are 173 - * modified, so it is entirely up to the continuation code to figure out 174 - * what to do. 175 - * 176 - * All the routines below use bits of fixup code that are out of line 177 - * with the main instruction path. This means when everything is well, 178 - * we don't even have to jump over them. Further, they do not intrude 179 - * on our cache or tlb entries. 180 - */ 181 - struct exception_table_entry { 182 - unsigned long insn, fixup; 183 - }; 184 - 185 - #if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU) 186 - #define ARCH_HAS_SEARCH_EXTABLE 187 - #endif 188 - 189 - int fixup_exception(struct pt_regs *regs); 190 146 191 147 extern void *set_exception_table_vec(unsigned int vec, void *handler); 192 148
-1
arch/sparc/Kconfig
··· 42 42 select OLD_SIGSUSPEND 43 43 select ARCH_HAS_SG_CHAIN 44 44 select CPU_NO_EFFICIENT_FFS 45 - select HAVE_ARCH_HARDENED_USERCOPY 46 45 select LOCKDEP_SMALL if LOCKDEP 47 46 select ARCH_WANT_RELAX_ORDER 48 47
+1 -1
arch/sparc/include/asm/uaccess.h
··· 7 7 #endif 8 8 9 9 #define user_addr_max() \ 10 - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 10 + (uaccess_kernel() ? ~0UL : TASK_SIZE) 11 11 12 12 long strncpy_from_user(char *dest, const char __user *src, long count); 13 13
+5 -39
arch/sparc/include/asm/uaccess_32.h
··· 7 7 #ifndef _ASM_UACCESS_H 8 8 #define _ASM_UACCESS_H 9 9 10 - #ifdef __KERNEL__ 11 10 #include <linux/compiler.h> 12 - #include <linux/sched.h> 13 11 #include <linux/string.h> 14 - #include <linux/errno.h> 15 - #endif 16 - 17 - #ifndef __ASSEMBLY__ 18 12 19 13 #include <asm/processor.h> 20 14 ··· 24 30 #define KERNEL_DS ((mm_segment_t) { 0 }) 25 31 #define USER_DS ((mm_segment_t) { -1 }) 26 32 27 - #define VERIFY_READ 0 28 - #define VERIFY_WRITE 1 29 - 30 33 #define get_ds() (KERNEL_DS) 31 34 #define get_fs() (current->thread.current_ds) 32 35 #define set_fs(val) ((current->thread.current_ds) = (val)) ··· 36 45 * large size and address near to PAGE_OFFSET - a fault will break his intentions. 37 46 */ 38 47 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) 39 - #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 48 + #define __kernel_ok (uaccess_kernel()) 40 49 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) 41 50 #define access_ok(type, addr, size) \ 42 51 ({ (void)(type); __access_ok((unsigned long)(addr), size); }) ··· 70 79 71 80 /* Returns 0 if exception not found and fixup otherwise. */ 72 81 unsigned long search_extables_range(unsigned long addr, unsigned long *g2); 73 - 74 - void __ret_efault(void); 75 82 76 83 /* Uh, these should become the main single-value transfer routines.. 77 84 * They automatically use the right size if we just have the right ··· 235 246 236 247 unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size); 237 248 238 - static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 249 + static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) 239 250 { 240 - if (n && __access_ok((unsigned long) to, n)) { 241 - check_object_size(from, n, true); 242 - return __copy_user(to, (__force void __user *) from, n); 243 - } else 244 - return n; 245 - } 246 - 247 - static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 248 - { 249 - check_object_size(from, n, true); 250 251 return __copy_user(to, (__force void __user *) from, n); 251 252 } 252 253 253 - static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 254 - { 255 - if (n && __access_ok((unsigned long) from, n)) { 256 - check_object_size(to, n, false); 257 - return __copy_user((__force void __user *) to, from, n); 258 - } else { 259 - memset(to, 0, n); 260 - return n; 261 - } 262 - } 263 - 264 - static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 254 + static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) 265 255 { 266 256 return __copy_user((__force void __user *) to, from, n); 267 257 } 268 258 269 - #define __copy_to_user_inatomic __copy_to_user 270 - #define __copy_from_user_inatomic __copy_from_user 259 + #define INLINE_COPY_FROM_USER 260 + #define INLINE_COPY_TO_USER 271 261 272 262 static inline unsigned long __clear_user(void __user *addr, unsigned long size) 273 263 { ··· 279 311 280 312 __must_check long strlen_user(const char __user *str); 281 313 __must_check long strnlen_user(const char __user *str, long n); 282 - 283 - #endif /* __ASSEMBLY__ */ 284 314 285 315 #endif /* _ASM_UACCESS_H */
+5 -39
arch/sparc/include/asm/uaccess_64.h
··· 5 5 * User space memory access functions 6 6 */ 7 7 8 - #ifdef __KERNEL__ 9 - #include <linux/errno.h> 10 8 #include <linux/compiler.h> 11 9 #include <linux/string.h> 12 - #include <linux/thread_info.h> 13 10 #include <asm/asi.h> 14 11 #include <asm/spitfire.h> 15 12 #include <asm-generic/uaccess-unaligned.h> 16 13 #include <asm/extable_64.h> 17 - #endif 18 - 19 - #ifndef __ASSEMBLY__ 20 14 21 15 #include <asm/processor.h> 22 16 ··· 29 35 30 36 #define KERNEL_DS ((mm_segment_t) { ASI_P }) 31 37 #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ 32 - 33 - #define VERIFY_READ 0 34 - #define VERIFY_WRITE 1 35 38 36 39 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) 37 40 #define get_ds() (KERNEL_DS) ··· 176 185 177 186 int __get_user_bad(void); 178 187 179 - unsigned long __must_check ___copy_from_user(void *to, 188 + unsigned long __must_check raw_copy_from_user(void *to, 180 189 const void __user *from, 181 190 unsigned long size); 182 - static inline unsigned long __must_check 183 - copy_from_user(void *to, const void __user *from, unsigned long size) 184 - { 185 - check_object_size(to, size, false); 186 191 187 - return ___copy_from_user(to, from, size); 188 - } 189 - #define __copy_from_user copy_from_user 190 - 191 - unsigned long __must_check ___copy_to_user(void __user *to, 192 + unsigned long __must_check raw_copy_to_user(void __user *to, 192 193 const void *from, 193 194 unsigned long size); 194 - static inline unsigned long __must_check 195 - copy_to_user(void __user *to, const void *from, unsigned long size) 196 - { 197 - check_object_size(from, size, true); 195 + #define INLINE_COPY_FROM_USER 196 + #define INLINE_COPY_TO_USER 198 197 199 - return ___copy_to_user(to, from, size); 200 - } 201 - #define __copy_to_user copy_to_user 202 - 203 - unsigned long __must_check ___copy_in_user(void __user *to, 198 + unsigned long __must_check raw_copy_in_user(void __user *to, 204 199 const void __user *from, 205 200 unsigned long size); 206 - static inline unsigned long __must_check 207 - copy_in_user(void __user *to, void __user *from, unsigned long size) 208 - { 209 - return ___copy_in_user(to, from, size); 210 - } 211 - #define __copy_in_user copy_in_user 212 201 213 202 unsigned long __must_check __clear_user(void __user *, unsigned long); 214 203 ··· 197 226 __must_check long strlen_user(const char __user *str); 198 227 __must_check long strnlen_user(const char __user *str, long n); 199 228 200 - #define __copy_to_user_inatomic __copy_to_user 201 - #define __copy_from_user_inatomic __copy_from_user 202 - 203 229 struct pt_regs; 204 230 unsigned long compute_effective_address(struct pt_regs *, 205 231 unsigned int insn, 206 232 unsigned int rd); 207 - 208 - #endif /* __ASSEMBLY__ */ 209 233 210 234 #endif /* _ASM_UACCESS_H */
-7
arch/sparc/kernel/head_32.S
··· 809 809 .word 0 810 810 .word 0 811 811 .word t_irq14 812 - 813 - .section ".fixup",#alloc,#execinstr 814 - .globl __ret_efault 815 - __ret_efault: 816 - ret 817 - restore %g0, -EFAULT, %o0 818 - EXPORT_SYMBOL(__ret_efault)
+1 -1
arch/sparc/lib/GENcopy_from_user.S
··· 23 23 #define PREAMBLE \ 24 24 rd %asi, %g1; \ 25 25 cmp %g1, ASI_AIUS; \ 26 - bne,pn %icc, ___copy_in_user; \ 26 + bne,pn %icc, raw_copy_in_user; \ 27 27 nop 28 28 #endif 29 29
+1 -1
arch/sparc/lib/GENcopy_to_user.S
··· 27 27 #define PREAMBLE \ 28 28 rd %asi, %g1; \ 29 29 cmp %g1, ASI_AIUS; \ 30 - bne,pn %icc, ___copy_in_user; \ 30 + bne,pn %icc, raw_copy_in_user; \ 31 31 nop 32 32 #endif 33 33
+2 -2
arch/sparc/lib/GENpatch.S
··· 26 26 .type generic_patch_copyops,#function 27 27 generic_patch_copyops: 28 28 GEN_DO_PATCH(memcpy, GENmemcpy) 29 - GEN_DO_PATCH(___copy_from_user, GENcopy_from_user) 30 - GEN_DO_PATCH(___copy_to_user, GENcopy_to_user) 29 + GEN_DO_PATCH(raw_copy_from_user, GENcopy_from_user) 30 + GEN_DO_PATCH(raw_copy_to_user, GENcopy_to_user) 31 31 retl 32 32 nop 33 33 .size generic_patch_copyops,.-generic_patch_copyops
+1 -1
arch/sparc/lib/NG2copy_from_user.S
··· 36 36 #define PREAMBLE \ 37 37 rd %asi, %g1; \ 38 38 cmp %g1, ASI_AIUS; \ 39 - bne,pn %icc, ___copy_in_user; \ 39 + bne,pn %icc, raw_copy_in_user; \ 40 40 nop 41 41 #endif 42 42
+1 -1
arch/sparc/lib/NG2copy_to_user.S
··· 45 45 #define PREAMBLE \ 46 46 rd %asi, %g1; \ 47 47 cmp %g1, ASI_AIUS; \ 48 - bne,pn %icc, ___copy_in_user; \ 48 + bne,pn %icc, raw_copy_in_user; \ 49 49 nop 50 50 #endif 51 51
+2 -2
arch/sparc/lib/NG2patch.S
··· 26 26 .type niagara2_patch_copyops,#function 27 27 niagara2_patch_copyops: 28 28 NG_DO_PATCH(memcpy, NG2memcpy) 29 - NG_DO_PATCH(___copy_from_user, NG2copy_from_user) 30 - NG_DO_PATCH(___copy_to_user, NG2copy_to_user) 29 + NG_DO_PATCH(raw_copy_from_user, NG2copy_from_user) 30 + NG_DO_PATCH(raw_copy_to_user, NG2copy_to_user) 31 31 retl 32 32 nop 33 33 .size niagara2_patch_copyops,.-niagara2_patch_copyops
+1 -1
arch/sparc/lib/NG4copy_from_user.S
··· 31 31 #define PREAMBLE \ 32 32 rd %asi, %g1; \ 33 33 cmp %g1, ASI_AIUS; \ 34 - bne,pn %icc, ___copy_in_user; \ 34 + bne,pn %icc, raw_copy_in_user; \ 35 35 nop 36 36 #endif 37 37
+1 -1
arch/sparc/lib/NG4copy_to_user.S
··· 40 40 #define PREAMBLE \ 41 41 rd %asi, %g1; \ 42 42 cmp %g1, ASI_AIUS; \ 43 - bne,pn %icc, ___copy_in_user; \ 43 + bne,pn %icc, raw_copy_in_user; \ 44 44 nop 45 45 #endif 46 46
+2 -2
arch/sparc/lib/NG4patch.S
··· 26 26 .type niagara4_patch_copyops,#function 27 27 niagara4_patch_copyops: 28 28 NG_DO_PATCH(memcpy, NG4memcpy) 29 - NG_DO_PATCH(___copy_from_user, NG4copy_from_user) 30 - NG_DO_PATCH(___copy_to_user, NG4copy_to_user) 29 + NG_DO_PATCH(raw_copy_from_user, NG4copy_from_user) 30 + NG_DO_PATCH(raw_copy_to_user, NG4copy_to_user) 31 31 retl 32 32 nop 33 33 .size niagara4_patch_copyops,.-niagara4_patch_copyops
+1 -1
arch/sparc/lib/NGcopy_from_user.S
··· 25 25 #define PREAMBLE \ 26 26 rd %asi, %g1; \ 27 27 cmp %g1, ASI_AIUS; \ 28 - bne,pn %icc, ___copy_in_user; \ 28 + bne,pn %icc, raw_copy_in_user; \ 29 29 nop 30 30 #endif 31 31
+1 -1
arch/sparc/lib/NGcopy_to_user.S
··· 28 28 #define PREAMBLE \ 29 29 rd %asi, %g1; \ 30 30 cmp %g1, ASI_AIUS; \ 31 - bne,pn %icc, ___copy_in_user; \ 31 + bne,pn %icc, raw_copy_in_user; \ 32 32 nop 33 33 #endif 34 34
+2 -2
arch/sparc/lib/NGpatch.S
··· 26 26 .type niagara_patch_copyops,#function 27 27 niagara_patch_copyops: 28 28 NG_DO_PATCH(memcpy, NGmemcpy) 29 - NG_DO_PATCH(___copy_from_user, NGcopy_from_user) 30 - NG_DO_PATCH(___copy_to_user, NGcopy_to_user) 29 + NG_DO_PATCH(raw_copy_from_user, NGcopy_from_user) 30 + NG_DO_PATCH(raw_copy_to_user, NGcopy_to_user) 31 31 retl 32 32 nop 33 33 .size niagara_patch_copyops,.-niagara_patch_copyops
+2 -2
arch/sparc/lib/U1copy_from_user.S
··· 19 19 .text; \ 20 20 .align 4; 21 21 22 - #define FUNC_NAME ___copy_from_user 22 + #define FUNC_NAME raw_copy_from_user 23 23 #define LOAD(type,addr,dest) type##a [addr] %asi, dest 24 24 #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest 25 25 #define EX_RETVAL(x) 0 ··· 31 31 #define PREAMBLE \ 32 32 rd %asi, %g1; \ 33 33 cmp %g1, ASI_AIUS; \ 34 - bne,pn %icc, ___copy_in_user; \ 34 + bne,pn %icc, raw_copy_in_user; \ 35 35 nop; \ 36 36 37 37 #include "U1memcpy.S"
+2 -2
arch/sparc/lib/U1copy_to_user.S
··· 19 19 .text; \ 20 20 .align 4; 21 21 22 - #define FUNC_NAME ___copy_to_user 22 + #define FUNC_NAME raw_copy_to_user 23 23 #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS 24 24 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS 25 25 #define EX_RETVAL(x) 0 ··· 31 31 #define PREAMBLE \ 32 32 rd %asi, %g1; \ 33 33 cmp %g1, ASI_AIUS; \ 34 - bne,pn %icc, ___copy_in_user; \ 34 + bne,pn %icc, raw_copy_in_user; \ 35 35 nop; \ 36 36 37 37 #include "U1memcpy.S"
+1 -1
arch/sparc/lib/U3copy_to_user.S
··· 31 31 #define PREAMBLE \ 32 32 rd %asi, %g1; \ 33 33 cmp %g1, ASI_AIUS; \ 34 - bne,pn %icc, ___copy_in_user; \ 34 + bne,pn %icc, raw_copy_in_user; \ 35 35 nop; \ 36 36 37 37 #include "U3memcpy.S"
+2 -2
arch/sparc/lib/U3patch.S
··· 26 26 .type cheetah_patch_copyops,#function 27 27 cheetah_patch_copyops: 28 28 ULTRA3_DO_PATCH(memcpy, U3memcpy) 29 - ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user) 30 - ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user) 29 + ULTRA3_DO_PATCH(raw_copy_from_user, U3copy_from_user) 30 + ULTRA3_DO_PATCH(raw_copy_to_user, U3copy_to_user) 31 31 retl 32 32 nop 33 33 .size cheetah_patch_copyops,.-cheetah_patch_copyops
+3 -3
arch/sparc/lib/copy_in_user.S
··· 44 44 * to copy register windows around during thread cloning. 45 45 */ 46 46 47 - ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ 47 + ENTRY(raw_copy_in_user) /* %o0=dst, %o1=src, %o2=len */ 48 48 cmp %o2, 0 49 49 be,pn %XCC, 85f 50 50 or %o0, %o1, %o3 ··· 105 105 add %o0, 1, %o0 106 106 retl 107 107 clr %o0 108 - ENDPROC(___copy_in_user) 109 - EXPORT_SYMBOL(___copy_in_user) 108 + ENDPROC(raw_copy_in_user) 109 + EXPORT_SYMBOL(raw_copy_in_user)
+1 -15
arch/sparc/lib/copy_user.S
··· 364 364 97: 365 365 mov %o2, %g3 366 366 fixupretl: 367 - sethi %hi(PAGE_OFFSET), %g1 368 - cmp %o0, %g1 369 - blu 1f 370 - cmp %o1, %g1 371 - bgeu 1f 372 - ld [%g6 + TI_PREEMPT], %g1 373 - cmp %g1, 0 374 - bne 1f 375 - nop 376 - save %sp, -64, %sp 377 - mov %i0, %o0 378 - call __bzero 379 - mov %g3, %o1 380 - restore 381 - 1: retl 367 + retl 382 368 mov %g3, %o0 383 369 384 370 /* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
+1
arch/tile/include/asm/Kbuild
··· 7 7 generic-y += emergency-restart.h 8 8 generic-y += errno.h 9 9 generic-y += exec.h 10 + generic-y += extable.h 10 11 generic-y += fb.h 11 12 generic-y += fcntl.h 12 13 generic-y += hw_irq.h
+8 -158
arch/tile/include/asm/uaccess.h
··· 18 18 /* 19 19 * User space memory access functions 20 20 */ 21 - #include <linux/sched.h> 22 21 #include <linux/mm.h> 23 22 #include <asm-generic/uaccess-unaligned.h> 24 23 #include <asm/processor.h> 25 24 #include <asm/page.h> 26 - 27 - #define VERIFY_READ 0 28 - #define VERIFY_WRITE 1 29 25 30 26 /* 31 27 * The fs value determines whether argument validity checking should be ··· 98 102 likely(__range_ok((unsigned long)(addr), (size)) == 0); \ 99 103 }) 100 104 101 - /* 102 - * The exception table consists of pairs of addresses: the first is the 103 - * address of an instruction that is allowed to fault, and the second is 104 - * the address at which the program should continue. No registers are 105 - * modified, so it is entirely up to the continuation code to figure out 106 - * what to do. 107 - * 108 - * All the routines below use bits of fixup code that are out of line 109 - * with the main instruction path. This means when everything is well, 110 - * we don't even have to jump over them. Further, they do not intrude 111 - * on our cache or tlb entries. 112 - */ 113 - 114 - struct exception_table_entry { 115 - unsigned long insn, fixup; 116 - }; 117 - 118 - extern int fixup_exception(struct pt_regs *regs); 105 + #include <asm/extable.h> 119 106 120 107 /* 121 108 * This is a type: either unsigned long, if the argument fits into ··· 313 334 ((x) = 0, -EFAULT); \ 314 335 }) 315 336 316 - /** 317 - * __copy_to_user() - copy data into user space, with less checking. 318 - * @to: Destination address, in user space. 319 - * @from: Source address, in kernel space. 320 - * @n: Number of bytes to copy. 321 - * 322 - * Context: User context only. This function may sleep if pagefaults are 323 - * enabled. 324 - * 325 - * Copy data from kernel space to user space. Caller must check 326 - * the specified block with access_ok() before calling this function. 327 - * 328 - * Returns number of bytes that could not be copied. 329 - * On success, this will be zero. 330 - * 331 - * An alternate version - __copy_to_user_inatomic() - is designed 332 - * to be called from atomic context, typically bracketed by calls 333 - * to pagefault_disable() and pagefault_enable(). 334 - */ 335 - extern unsigned long __must_check __copy_to_user_inatomic( 336 - void __user *to, const void *from, unsigned long n); 337 - 338 - static inline unsigned long __must_check 339 - __copy_to_user(void __user *to, const void *from, unsigned long n) 340 - { 341 - might_fault(); 342 - return __copy_to_user_inatomic(to, from, n); 343 - } 344 - 345 - static inline unsigned long __must_check 346 - copy_to_user(void __user *to, const void *from, unsigned long n) 347 - { 348 - if (access_ok(VERIFY_WRITE, to, n)) 349 - n = __copy_to_user(to, from, n); 350 - return n; 351 - } 352 - 353 - /** 354 - * __copy_from_user() - copy data from user space, with less checking. 355 - * @to: Destination address, in kernel space. 356 - * @from: Source address, in user space. 357 - * @n: Number of bytes to copy. 358 - * 359 - * Context: User context only. This function may sleep if pagefaults are 360 - * enabled. 361 - * 362 - * Copy data from user space to kernel space. Caller must check 363 - * the specified block with access_ok() before calling this function. 364 - * 365 - * Returns number of bytes that could not be copied. 366 - * On success, this will be zero. 367 - * 368 - * If some data could not be copied, this function will pad the copied 369 - * data to the requested size using zero bytes. 370 - * 371 - * An alternate version - __copy_from_user_inatomic() - is designed 372 - * to be called from atomic context, typically bracketed by calls 373 - * to pagefault_disable() and pagefault_enable(). This version 374 - * does *NOT* pad with zeros. 375 - */ 376 - extern unsigned long __must_check __copy_from_user_inatomic( 377 - void *to, const void __user *from, unsigned long n); 378 - extern unsigned long __must_check __copy_from_user_zeroing( 379 - void *to, const void __user *from, unsigned long n); 380 - 381 - static inline unsigned long __must_check 382 - __copy_from_user(void *to, const void __user *from, unsigned long n) 383 - { 384 - might_fault(); 385 - return __copy_from_user_zeroing(to, from, n); 386 - } 387 - 388 - static inline unsigned long __must_check 389 - _copy_from_user(void *to, const void __user *from, unsigned long n) 390 - { 391 - if (access_ok(VERIFY_READ, from, n)) 392 - n = __copy_from_user(to, from, n); 393 - else 394 - memset(to, 0, n); 395 - return n; 396 - } 397 - 398 - extern void __compiletime_error("usercopy buffer size is too small") 399 - __bad_copy_user(void); 400 - 401 - static inline void copy_user_overflow(int size, unsigned long count) 402 - { 403 - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 404 - } 405 - 406 - static inline unsigned long __must_check copy_from_user(void *to, 407 - const void __user *from, 408 - unsigned long n) 409 - { 410 - int sz = __compiletime_object_size(to); 411 - 412 - if (likely(sz == -1 || sz >= n)) 413 - n = _copy_from_user(to, from, n); 414 - else if (!__builtin_constant_p(n)) 415 - copy_user_overflow(sz, n); 416 - else 417 - __bad_copy_user(); 418 - 419 - return n; 420 - } 337 + extern unsigned long __must_check 338 + raw_copy_to_user(void __user *to, const void *from, unsigned long n); 339 + extern unsigned long __must_check 340 + raw_copy_from_user(void *to, const void __user *from, unsigned long n); 341 + #define INLINE_COPY_FROM_USER 342 + #define INLINE_COPY_TO_USER 421 343 422 344 #ifdef __tilegx__ 423 - /** 424 - * __copy_in_user() - copy data within user space, with less checking. 425 - * @to: Destination address, in user space. 426 - * @from: Source address, in user space. 427 - * @n: Number of bytes to copy. 428 - * 429 - * Context: User context only. This function may sleep if pagefaults are 430 - * enabled. 431 - * 432 - * Copy data from user space to user space. Caller must check 433 - * the specified blocks with access_ok() before calling this function. 434 - * 435 - * Returns number of bytes that could not be copied. 436 - * On success, this will be zero. 437 - */ 438 - extern unsigned long __copy_in_user_inatomic( 345 + extern unsigned long raw_copy_in_user( 439 346 void __user *to, const void __user *from, unsigned long n); 440 - 441 - static inline unsigned long __must_check 442 - __copy_in_user(void __user *to, const void __user *from, unsigned long n) 443 - { 444 - might_fault(); 445 - return __copy_in_user_inatomic(to, from, n); 446 - } 447 - 448 - static inline unsigned long __must_check 449 - copy_in_user(void __user *to, const void __user *from, unsigned long n) 450 - { 451 - if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) 452 - n = __copy_in_user(to, from, n); 453 - return n; 454 - } 455 347 #endif 456 348 457 349
+3 -4
arch/tile/lib/exports.c
··· 38 38 39 39 /* arch/tile/lib/, various memcpy files */ 40 40 EXPORT_SYMBOL(memcpy); 41 - EXPORT_SYMBOL(__copy_to_user_inatomic); 42 - EXPORT_SYMBOL(__copy_from_user_inatomic); 43 - EXPORT_SYMBOL(__copy_from_user_zeroing); 41 + EXPORT_SYMBOL(raw_copy_to_user); 42 + EXPORT_SYMBOL(raw_copy_from_user); 44 43 #ifdef __tilegx__ 45 - EXPORT_SYMBOL(__copy_in_user_inatomic); 44 + EXPORT_SYMBOL(raw_copy_in_user); 46 45 #endif 47 46 48 47 /* hypervisor glue */
+13 -28
arch/tile/lib/memcpy_32.S
··· 24 24 25 25 #define IS_MEMCPY 0 26 26 #define IS_COPY_FROM_USER 1 27 - #define IS_COPY_FROM_USER_ZEROING 2 28 27 #define IS_COPY_TO_USER -1 29 28 30 29 .section .text.memcpy_common, "ax" ··· 41 42 9 42 43 43 44 44 - /* __copy_from_user_inatomic takes the kernel target address in r0, 45 + /* raw_copy_from_user takes the kernel target address in r0, 45 46 * the user source in r1, and the bytes to copy in r2. 46 47 * It returns the number of uncopiable bytes (hopefully zero) in r0. 47 48 */ 48 - ENTRY(__copy_from_user_inatomic) 49 - .type __copy_from_user_inatomic, @function 50 - FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \ 49 + ENTRY(raw_copy_from_user) 50 + .type raw_copy_from_user, @function 51 + FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \ 51 52 .text.memcpy_common, \ 52 - .Lend_memcpy_common - __copy_from_user_inatomic) 53 + .Lend_memcpy_common - raw_copy_from_user) 53 54 { movei r29, IS_COPY_FROM_USER; j memcpy_common } 54 - .size __copy_from_user_inatomic, . - __copy_from_user_inatomic 55 + .size raw_copy_from_user, . - raw_copy_from_user 55 56 56 - /* __copy_from_user_zeroing is like __copy_from_user_inatomic, but 57 - * any uncopiable bytes are zeroed in the target. 58 - */ 59 - ENTRY(__copy_from_user_zeroing) 60 - .type __copy_from_user_zeroing, @function 61 - FEEDBACK_REENTER(__copy_from_user_inatomic) 62 - { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common } 63 - .size __copy_from_user_zeroing, . - __copy_from_user_zeroing 64 - 65 - /* __copy_to_user_inatomic takes the user target address in r0, 57 + /* raw_copy_to_user takes the user target address in r0, 66 58 * the kernel source in r1, and the bytes to copy in r2. 67 59 * It returns the number of uncopiable bytes (hopefully zero) in r0. 68 60 */ 69 - ENTRY(__copy_to_user_inatomic) 70 - .type __copy_to_user_inatomic, @function 71 - FEEDBACK_REENTER(__copy_from_user_inatomic) 61 + ENTRY(raw_copy_to_user) 62 + .type raw_copy_to_user, @function 63 + FEEDBACK_REENTER(raw_copy_from_user) 72 64 { movei r29, IS_COPY_TO_USER; j memcpy_common } 73 - .size __copy_to_user_inatomic, . - __copy_to_user_inatomic 65 + .size raw_copy_to_user, . - raw_copy_to_user 74 66 75 67 ENTRY(memcpy) 76 68 .type memcpy, @function 77 - FEEDBACK_REENTER(__copy_from_user_inatomic) 69 + FEEDBACK_REENTER(raw_copy_from_user) 78 70 { movei r29, IS_MEMCPY } 79 71 .size memcpy, . - memcpy 80 72 /* Fall through */ ··· 510 520 { bnzt r2, copy_from_user_fixup_loop } 511 521 512 522 .Lcopy_from_user_fixup_zero_remainder: 513 - { bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */ 514 - /* byte-at-a-time loop faulted, so zero the rest. */ 515 - { move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ } 516 - 1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 } 517 - { bnzt r3, 1b } 518 - 2: move lr, r27 523 + move lr, r27 519 524 { move r0, r2; jrp lr } 520 525 521 526 copy_to_user_fixup_loop:
+3 -12
arch/tile/lib/memcpy_user_64.c
··· 51 51 __v; \ 52 52 }) 53 53 54 - #define USERCOPY_FUNC __copy_to_user_inatomic 54 + #define USERCOPY_FUNC raw_copy_to_user 55 55 #define ST1(p, v) _ST((p), st1, (v)) 56 56 #define ST2(p, v) _ST((p), st2, (v)) 57 57 #define ST4(p, v) _ST((p), st4, (v)) ··· 62 62 #define LD8 LD 63 63 #include "memcpy_64.c" 64 64 65 - #define USERCOPY_FUNC __copy_from_user_inatomic 65 + #define USERCOPY_FUNC raw_copy_from_user 66 66 #define ST1 ST 67 67 #define ST2 ST 68 68 #define ST4 ST ··· 73 73 #define LD8(p) _LD((p), ld) 74 74 #include "memcpy_64.c" 75 75 76 - #define USERCOPY_FUNC __copy_in_user_inatomic 76 + #define USERCOPY_FUNC raw_copy_in_user 77 77 #define ST1(p, v) _ST((p), st1, (v)) 78 78 #define ST2(p, v) _ST((p), st2, (v)) 79 79 #define ST4(p, v) _ST((p), st4, (v)) ··· 83 83 #define LD4(p) _LD((p), ld4u) 84 84 #define LD8(p) _LD((p), ld) 85 85 #include "memcpy_64.c" 86 - 87 - unsigned long __copy_from_user_zeroing(void *to, const void __user *from, 88 - unsigned long n) 89 - { 90 - unsigned long rc = __copy_from_user_inatomic(to, from, n); 91 - if (unlikely(rc)) 92 - memset(to + n - rc, 0, rc); 93 - return rc; 94 - }
+1
arch/um/include/asm/Kbuild
··· 6 6 generic-y += device.h 7 7 generic-y += emergency-restart.h 8 8 generic-y += exec.h 9 + generic-y += extable.h 9 10 generic-y += ftrace.h 10 11 generic-y += futex.h 11 12 generic-y += hardirq.h
+5 -8
arch/um/include/asm/uaccess.h
··· 7 7 #ifndef __UM_UACCESS_H 8 8 #define __UM_UACCESS_H 9 9 10 - #include <asm/thread_info.h> 11 10 #include <asm/elf.h> 12 11 13 12 #define __under_task_size(addr, size) \ ··· 21 22 #define __addr_range_nowrap(addr, size) \ 22 23 ((unsigned long) (addr) <= ((unsigned long) (addr) + (size))) 23 24 24 - extern long __copy_from_user(void *to, const void __user *from, unsigned long n); 25 - extern long __copy_to_user(void __user *to, const void *from, unsigned long n); 25 + extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); 26 + extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); 26 27 extern long __strncpy_from_user(char *dst, const char __user *src, long count); 27 28 extern long __strnlen_user(const void __user *str, long len); 28 29 extern unsigned long __clear_user(void __user *mem, unsigned long len); ··· 31 32 /* Teach asm-generic/uaccess.h that we have C functions for these. */ 32 33 #define __access_ok __access_ok 33 34 #define __clear_user __clear_user 34 - #define __copy_to_user __copy_to_user 35 - #define __copy_from_user __copy_from_user 36 35 #define __strnlen_user __strnlen_user 37 36 #define __strncpy_from_user __strncpy_from_user 38 - #define __copy_to_user_inatomic __copy_to_user 39 - #define __copy_from_user_inatomic __copy_from_user 37 + #define INLINE_COPY_FROM_USER 38 + #define INLINE_COPY_TO_USER 40 39 41 40 #include <asm-generic/uaccess.h> 42 41 ··· 43 46 return __addr_range_nowrap(addr, size) && 44 47 (__under_task_size(addr, size) || 45 48 __access_ok_vsyscall(addr, size) || 46 - segment_eq(get_fs(), KERNEL_DS)); 49 + uaccess_kernel()); 47 50 } 48 51 49 52 #endif
+9 -9
arch/um/kernel/skas/uaccess.c
··· 139 139 return 0; 140 140 } 141 141 142 - long __copy_from_user(void *to, const void __user *from, unsigned long n) 142 + unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) 143 143 { 144 - if (segment_eq(get_fs(), KERNEL_DS)) { 144 + if (uaccess_kernel()) { 145 145 memcpy(to, (__force void*)from, n); 146 146 return 0; 147 147 } 148 148 149 149 return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to); 150 150 } 151 - EXPORT_SYMBOL(__copy_from_user); 151 + EXPORT_SYMBOL(raw_copy_from_user); 152 152 153 153 static int copy_chunk_to_user(unsigned long to, int len, void *arg) 154 154 { ··· 159 159 return 0; 160 160 } 161 161 162 - long __copy_to_user(void __user *to, const void *from, unsigned long n) 162 + unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) 163 163 { 164 - if (segment_eq(get_fs(), KERNEL_DS)) { 164 + if (uaccess_kernel()) { 165 165 memcpy((__force void *) to, from, n); 166 166 return 0; 167 167 } 168 168 169 169 return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from); 170 170 } 171 - EXPORT_SYMBOL(__copy_to_user); 171 + EXPORT_SYMBOL(raw_copy_to_user); 172 172 173 173 static int strncpy_chunk_from_user(unsigned long from, int len, void *arg) 174 174 { ··· 189 189 long n; 190 190 char *ptr = dst; 191 191 192 - if (segment_eq(get_fs(), KERNEL_DS)) { 192 + if (uaccess_kernel()) { 193 193 strncpy(dst, (__force void *) src, count); 194 194 return strnlen(dst, count); 195 195 } ··· 210 210 211 211 unsigned long __clear_user(void __user *mem, unsigned long len) 212 212 { 213 - if (segment_eq(get_fs(), KERNEL_DS)) { 213 + if (uaccess_kernel()) { 214 214 memset((__force void*)mem, 0, len); 215 215 return 0; 216 216 } ··· 235 235 { 236 236 int count = 0, n; 237 237 238 - if (segment_eq(get_fs(), KERNEL_DS)) 238 + if (uaccess_kernel()) 239 239 return strnlen((__force char*)str, len) + 1; 240 240 241 241 n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
+1
arch/unicore32/include/asm/Kbuild
··· 10 10 generic-y += emergency-restart.h 11 11 generic-y += errno.h 12 12 generic-y += exec.h 13 + generic-y += extable.h 13 14 generic-y += fb.h 14 15 generic-y += fcntl.h 15 16 generic-y += ftrace.h
+5 -10
arch/unicore32/include/asm/uaccess.h
··· 12 12 #ifndef __UNICORE_UACCESS_H__ 13 13 #define __UNICORE_UACCESS_H__ 14 14 15 - #include <linux/thread_info.h> 16 - #include <linux/errno.h> 17 - 18 15 #include <asm/memory.h> 19 16 20 - #define __copy_from_user __copy_from_user 21 - #define __copy_to_user __copy_to_user 22 17 #define __strncpy_from_user __strncpy_from_user 23 18 #define __strnlen_user __strnlen_user 24 19 #define __clear_user __clear_user 25 20 26 - #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 21 + #define __kernel_ok (uaccess_kernel()) 27 22 #define __user_ok(addr, size) (((size) <= TASK_SIZE) \ 28 23 && ((addr) <= TASK_SIZE - (size))) 29 24 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) 30 25 31 26 extern unsigned long __must_check 32 - __copy_from_user(void *to, const void __user *from, unsigned long n); 27 + raw_copy_from_user(void *to, const void __user *from, unsigned long n); 33 28 extern unsigned long __must_check 34 - __copy_to_user(void __user *to, const void *from, unsigned long n); 29 + raw_copy_to_user(void __user *to, const void *from, unsigned long n); 35 30 extern unsigned long __must_check 36 31 __clear_user(void __user *addr, unsigned long n); 37 32 extern unsigned long __must_check 38 33 __strncpy_from_user(char *to, const char __user *from, unsigned long count); 39 34 extern unsigned long 40 35 __strnlen_user(const char __user *s, long n); 36 + #define INLINE_COPY_FROM_USER 37 + #define INLINE_COPY_TO_USER 41 38 42 39 #include <asm-generic/uaccess.h> 43 - 44 - extern int fixup_exception(struct pt_regs *regs); 45 40 46 41 #endif /* __UNICORE_UACCESS_H__ */
+2 -2
arch/unicore32/kernel/ksyms.c
··· 46 46 47 47 EXPORT_SYMBOL(copy_page); 48 48 49 - EXPORT_SYMBOL(__copy_from_user); 50 - EXPORT_SYMBOL(__copy_to_user); 49 + EXPORT_SYMBOL(raw_copy_from_user); 50 + EXPORT_SYMBOL(raw_copy_to_user); 51 51 EXPORT_SYMBOL(__clear_user); 52 52 53 53 EXPORT_SYMBOL(__ashldi3);
+1 -1
arch/unicore32/kernel/process.c
··· 178 178 buf, interrupts_enabled(regs) ? "n" : "ff", 179 179 fast_interrupts_enabled(regs) ? "n" : "ff", 180 180 processor_modes[processor_mode(regs)], 181 - segment_eq(get_fs(), get_ds()) ? "kernel" : "user"); 181 + uaccess_kernel() ? "kernel" : "user"); 182 182 { 183 183 unsigned int ctrl; 184 184
+6 -10
arch/unicore32/lib/copy_from_user.S
··· 16 16 /* 17 17 * Prototype: 18 18 * 19 - * size_t __copy_from_user(void *to, const void *from, size_t n) 19 + * size_t raw_copy_from_user(void *to, const void *from, size_t n) 20 20 * 21 21 * Purpose: 22 22 * ··· 87 87 88 88 .text 89 89 90 - ENTRY(__copy_from_user) 90 + ENTRY(raw_copy_from_user) 91 91 92 92 #include "copy_template.S" 93 93 94 - ENDPROC(__copy_from_user) 94 + ENDPROC(raw_copy_from_user) 95 95 96 96 .pushsection .fixup,"ax" 97 97 .align 0 98 98 copy_abort_preamble 99 - ldm.w (r1, r2), [sp]+ 100 - sub r3, r0, r1 101 - rsub r2, r3, r2 102 - stw r2, [sp] 103 - mov r1, #0 104 - b.l memset 105 - ldw.w r0, [sp]+, #4 99 + ldm.w (r1, r2, r3), [sp]+ 100 + sub r0, r0, r1 101 + rsub r0, r0, r2 106 102 copy_abort_end 107 103 .popsection 108 104
+3 -3
arch/unicore32/lib/copy_to_user.S
··· 16 16 /* 17 17 * Prototype: 18 18 * 19 - * size_t __copy_to_user(void *to, const void *from, size_t n) 19 + * size_t raw_copy_to_user(void *to, const void *from, size_t n) 20 20 * 21 21 * Purpose: 22 22 * ··· 79 79 80 80 .text 81 81 82 - WEAK(__copy_to_user) 82 + WEAK(raw_copy_to_user) 83 83 84 84 #include "copy_template.S" 85 85 86 - ENDPROC(__copy_to_user) 86 + ENDPROC(raw_copy_to_user) 87 87 88 88 .pushsection .fixup,"ax" 89 89 .align 0
-1
arch/x86/Kconfig
··· 98 98 select HAVE_ACPI_APEI_NMI if ACPI 99 99 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 100 100 select HAVE_ARCH_AUDITSYSCALL 101 - select HAVE_ARCH_HARDENED_USERCOPY 102 101 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 103 102 select HAVE_ARCH_JUMP_LABEL 104 103 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
+12 -58
arch/x86/include/asm/uaccess.h
··· 3 3 /* 4 4 * User space memory access functions 5 5 */ 6 - #include <linux/errno.h> 7 6 #include <linux/compiler.h> 8 7 #include <linux/kasan-checks.h> 9 - #include <linux/thread_info.h> 10 8 #include <linux/string.h> 11 9 #include <asm/asm.h> 12 10 #include <asm/page.h> 13 11 #include <asm/smap.h> 14 12 #include <asm/extable.h> 15 - 16 - #define VERIFY_READ 0 17 - #define VERIFY_WRITE 1 18 13 19 14 /* 20 15 * The fs value determines whether argument validity checking should be ··· 379 384 : "=r" (err), ltype(x) \ 380 385 : "m" (__m(addr)), "i" (errret), "0" (err)) 381 386 387 + #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 388 + asm volatile("\n" \ 389 + "1: mov"itype" %2,%"rtype"1\n" \ 390 + "2:\n" \ 391 + ".section .fixup,\"ax\"\n" \ 392 + "3: mov %3,%0\n" \ 393 + " jmp 2b\n" \ 394 + ".previous\n" \ 395 + _ASM_EXTABLE(1b, 3b) \ 396 + : "=r" (err), ltype(x) \ 397 + : "m" (__m(addr)), "i" (errret), "0" (err)) 398 + 382 399 /* 383 400 * This doesn't do __uaccess_begin/end - the exception handling 384 401 * around it must do that. ··· 681 674 #else 682 675 # include <asm/uaccess_64.h> 683 676 #endif 684 - 685 - unsigned long __must_check _copy_from_user(void *to, const void __user *from, 686 - unsigned n); 687 - unsigned long __must_check _copy_to_user(void __user *to, const void *from, 688 - unsigned n); 689 - 690 - extern void __compiletime_error("usercopy buffer size is too small") 691 - __bad_copy_user(void); 692 - 693 - static inline void copy_user_overflow(int size, unsigned long count) 694 - { 695 - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 696 - } 697 - 698 - static __always_inline unsigned long __must_check 699 - copy_from_user(void *to, const void __user *from, unsigned long n) 700 - { 701 - int sz = __compiletime_object_size(to); 702 - 703 - might_fault(); 704 - 705 - kasan_check_write(to, n); 706 - 707 - if (likely(sz < 0 || sz >= n)) { 708 - check_object_size(to, n, false); 709 - n = _copy_from_user(to, from, n); 710 - } else if (!__builtin_constant_p(n)) 711 - copy_user_overflow(sz, n); 712 - else 713 - __bad_copy_user(); 714 - 715 - return n; 716 - } 717 - 718 - static __always_inline unsigned long __must_check 719 - copy_to_user(void __user *to, const void *from, unsigned long n) 720 - { 721 - int sz = __compiletime_object_size(from); 722 - 723 - kasan_check_read(from, n); 724 - 725 - might_fault(); 726 - 727 - if (likely(sz < 0 || sz >= n)) { 728 - check_object_size(from, n, true); 729 - n = _copy_to_user(to, from, n); 730 - } else if (!__builtin_constant_p(n)) 731 - copy_user_overflow(sz, n); 732 - else 733 - __bad_copy_user(); 734 - 735 - return n; 736 - } 737 677 738 678 /* 739 679 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
+15 -112
arch/x86/include/asm/uaccess_32.h
··· 4 4 /* 5 5 * User space memory access functions 6 6 */ 7 - #include <linux/errno.h> 8 - #include <linux/thread_info.h> 9 7 #include <linux/string.h> 10 8 #include <asm/asm.h> 11 9 #include <asm/page.h> 12 10 13 - unsigned long __must_check __copy_to_user_ll 14 - (void __user *to, const void *from, unsigned long n); 15 - unsigned long __must_check __copy_from_user_ll 16 - (void *to, const void __user *from, unsigned long n); 17 - unsigned long __must_check __copy_from_user_ll_nozero 18 - (void *to, const void __user *from, unsigned long n); 19 - unsigned long __must_check __copy_from_user_ll_nocache 20 - (void *to, const void __user *from, unsigned long n); 11 + unsigned long __must_check __copy_user_ll 12 + (void *to, const void *from, unsigned long n); 21 13 unsigned long __must_check __copy_from_user_ll_nocache_nozero 22 14 (void *to, const void __user *from, unsigned long n); 23 15 24 - /** 25 - * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. 26 - * @to: Destination address, in user space. 27 - * @from: Source address, in kernel space. 28 - * @n: Number of bytes to copy. 29 - * 30 - * Context: User context only. 31 - * 32 - * Copy data from kernel space to user space. Caller must check 33 - * the specified block with access_ok() before calling this function. 34 - * The caller should also make sure he pins the user space address 35 - * so that we don't result in page fault and sleep. 36 - */ 37 16 static __always_inline unsigned long __must_check 38 - __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 17 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 39 18 { 40 - check_object_size(from, n, true); 41 - return __copy_to_user_ll(to, from, n); 42 - } 43 - 44 - /** 45 - * __copy_to_user: - Copy a block of data into user space, with less checking. 46 - * @to: Destination address, in user space. 47 - * @from: Source address, in kernel space. 48 - * @n: Number of bytes to copy. 49 - * 50 - * Context: User context only. This function may sleep if pagefaults are 51 - * enabled. 52 - * 53 - * Copy data from kernel space to user space. Caller must check 54 - * the specified block with access_ok() before calling this function. 55 - * 56 - * Returns number of bytes that could not be copied. 57 - * On success, this will be zero. 58 - */ 59 - static __always_inline unsigned long __must_check 60 - __copy_to_user(void __user *to, const void *from, unsigned long n) 61 - { 62 - might_fault(); 63 - return __copy_to_user_inatomic(to, from, n); 19 + return __copy_user_ll((__force void *)to, from, n); 64 20 } 65 21 66 22 static __always_inline unsigned long 67 - __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 23 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 68 24 { 69 - return __copy_from_user_ll_nozero(to, from, n); 70 - } 71 - 72 - /** 73 - * __copy_from_user: - Copy a block of data from user space, with less checking. 74 - * @to: Destination address, in kernel space. 75 - * @from: Source address, in user space. 76 - * @n: Number of bytes to copy. 77 - * 78 - * Context: User context only. This function may sleep if pagefaults are 79 - * enabled. 80 - * 81 - * Copy data from user space to kernel space. Caller must check 82 - * the specified block with access_ok() before calling this function. 83 - * 84 - * Returns number of bytes that could not be copied. 85 - * On success, this will be zero. 86 - * 87 - * If some data could not be copied, this function will pad the copied 88 - * data to the requested size using zero bytes. 89 - * 90 - * An alternate version - __copy_from_user_inatomic() - may be called from 91 - * atomic context and will fail rather than sleep. In this case the 92 - * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h 93 - * for explanation of why this is needed. 94 - */ 95 - static __always_inline unsigned long 96 - __copy_from_user(void *to, const void __user *from, unsigned long n) 97 - { 98 - might_fault(); 99 - check_object_size(to, n, false); 100 25 if (__builtin_constant_p(n)) { 101 26 unsigned long ret; 102 27 103 28 switch (n) { 104 29 case 1: 30 + ret = 0; 105 31 __uaccess_begin(); 106 - __get_user_size(*(u8 *)to, from, 1, ret, 1); 32 + __get_user_asm_nozero(*(u8 *)to, from, ret, 33 + "b", "b", "=q", 1); 107 34 __uaccess_end(); 108 35 return ret; 109 36 case 2: 37 + ret = 0; 110 38 __uaccess_begin(); 111 - __get_user_size(*(u16 *)to, from, 2, ret, 2); 39 + __get_user_asm_nozero(*(u16 *)to, from, ret, 40 + "w", "w", "=r", 2); 112 41 __uaccess_end(); 113 42 return ret; 114 43 case 4: 44 + ret = 0; 115 45 __uaccess_begin(); 116 - __get_user_size(*(u32 *)to, from, 4, ret, 4); 46 + __get_user_asm_nozero(*(u32 *)to, from, ret, 47 + "l", "k", "=r", 4); 117 48 __uaccess_end(); 118 49 return ret; 119 50 } 120 51 } 121 - return __copy_from_user_ll(to, from, n); 122 - } 123 - 124 - static __always_inline unsigned long __copy_from_user_nocache(void *to, 125 - const void __user *from, unsigned long n) 126 - { 127 - might_fault(); 128 - if (__builtin_constant_p(n)) { 129 - unsigned long ret; 130 - 131 - switch (n) { 132 - case 1: 133 - __uaccess_begin(); 134 - __get_user_size(*(u8 *)to, from, 1, ret, 1); 135 - __uaccess_end(); 136 - return ret; 137 - case 2: 138 - __uaccess_begin(); 139 - __get_user_size(*(u16 *)to, from, 2, ret, 2); 140 - __uaccess_end(); 141 - return ret; 142 - case 4: 143 - __uaccess_begin(); 144 - __get_user_size(*(u32 *)to, from, 4, ret, 4); 145 - __uaccess_end(); 146 - return ret; 147 - } 148 - } 149 - return __copy_from_user_ll_nocache(to, from, n); 52 + return __copy_user_ll(to, (__force const void *)from, n); 150 53 } 151 54 152 55 static __always_inline unsigned long
+15 -113
arch/x86/include/asm/uaccess_64.h
··· 5 5 * User space memory access functions 6 6 */ 7 7 #include <linux/compiler.h> 8 - #include <linux/errno.h> 9 8 #include <linux/lockdep.h> 10 9 #include <linux/kasan-checks.h> 11 10 #include <asm/alternative.h> ··· 45 46 return ret; 46 47 } 47 48 48 - __must_check unsigned long 49 - copy_in_user(void __user *to, const void __user *from, unsigned len); 50 - 51 - static __always_inline __must_check 52 - int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) 49 + static __always_inline __must_check unsigned long 50 + raw_copy_from_user(void *dst, const void __user *src, unsigned long size) 53 51 { 54 52 int ret = 0; 55 53 56 - check_object_size(dst, size, false); 57 54 if (!__builtin_constant_p(size)) 58 55 return copy_user_generic(dst, (__force void *)src, size); 59 56 switch (size) { 60 57 case 1: 61 58 __uaccess_begin(); 62 - __get_user_asm(*(u8 *)dst, (u8 __user *)src, 59 + __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, 63 60 ret, "b", "b", "=q", 1); 64 61 __uaccess_end(); 65 62 return ret; 66 63 case 2: 67 64 __uaccess_begin(); 68 - __get_user_asm(*(u16 *)dst, (u16 __user *)src, 65 + __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, 69 66 ret, "w", "w", "=r", 2); 70 67 __uaccess_end(); 71 68 return ret; 72 69 case 4: 73 70 __uaccess_begin(); 74 - __get_user_asm(*(u32 *)dst, (u32 __user *)src, 71 + __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, 75 72 ret, "l", "k", "=r", 4); 76 73 __uaccess_end(); 77 74 return ret; 78 75 case 8: 79 76 __uaccess_begin(); 80 - __get_user_asm(*(u64 *)dst, (u64 __user *)src, 77 + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, 81 78 ret, "q", "", "=r", 8); 82 79 __uaccess_end(); 83 80 return ret; 84 81 case 10: 85 82 __uaccess_begin(); 86 - __get_user_asm(*(u64 *)dst, (u64 __user *)src, 83 + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, 87 84 ret, "q", "", "=r", 10); 88 85 if (likely(!ret)) 89 - __get_user_asm(*(u16 *)(8 + (char *)dst), 86 + __get_user_asm_nozero(*(u16 *)(8 + (char *)dst), 90 87 (u16 __user *)(8 + (char __user *)src), 91 88 ret, "w", "w", "=r", 2); 92 89 __uaccess_end(); 93 90 return ret; 94 91 case 16: 95 92 __uaccess_begin(); 96 - __get_user_asm(*(u64 *)dst, (u64 __user *)src, 93 + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, 97 94 ret, "q", "", "=r", 16); 98 95 if (likely(!ret)) 99 - __get_user_asm(*(u64 *)(8 + (char *)dst), 96 + __get_user_asm_nozero(*(u64 *)(8 + (char *)dst), 100 97 (u64 __user *)(8 + (char __user *)src), 101 98 ret, "q", "", "=r", 8); 102 99 __uaccess_end(); ··· 102 107 } 103 108 } 104 109 105 - static __always_inline __must_check 106 - int __copy_from_user(void *dst, const void __user *src, unsigned size) 107 - { 108 - might_fault(); 109 - kasan_check_write(dst, size); 110 - return __copy_from_user_nocheck(dst, src, size); 111 - } 112 - 113 - static __always_inline __must_check 114 - int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) 110 + static __always_inline __must_check unsigned long 111 + raw_copy_to_user(void __user *dst, const void *src, unsigned long size) 115 112 { 116 113 int ret = 0; 117 114 118 - check_object_size(src, size, true); 119 115 if (!__builtin_constant_p(size)) 120 116 return copy_user_generic((__force void *)dst, src, size); 121 117 switch (size) { ··· 162 176 } 163 177 164 178 static __always_inline __must_check 165 - int __copy_to_user(void __user *dst, const void *src, unsigned size) 179 + unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size) 166 180 { 167 - might_fault(); 168 - kasan_check_read(src, size); 169 - return __copy_to_user_nocheck(dst, src, size); 170 - } 171 - 172 - static __always_inline __must_check 173 - int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 174 - { 175 - int ret = 0; 176 - 177 - might_fault(); 178 - if (!__builtin_constant_p(size)) 179 - return copy_user_generic((__force void *)dst, 180 - (__force void *)src, size); 181 - switch (size) { 182 - case 1: { 183 - u8 tmp; 184 - __uaccess_begin(); 185 - __get_user_asm(tmp, (u8 __user *)src, 186 - ret, "b", "b", "=q", 1); 187 - if (likely(!ret)) 188 - __put_user_asm(tmp, (u8 __user *)dst, 189 - ret, "b", "b", "iq", 1); 190 - __uaccess_end(); 191 - return ret; 192 - } 193 - case 2: { 194 - u16 tmp; 195 - __uaccess_begin(); 196 - __get_user_asm(tmp, (u16 __user *)src, 197 - ret, "w", "w", "=r", 2); 198 - if (likely(!ret)) 199 - __put_user_asm(tmp, (u16 __user *)dst, 200 - ret, "w", "w", "ir", 2); 201 - __uaccess_end(); 202 - return ret; 203 - } 204 - 205 - case 4: { 206 - u32 tmp; 207 - __uaccess_begin(); 208 - __get_user_asm(tmp, (u32 __user *)src, 209 - ret, "l", "k", "=r", 4); 210 - if (likely(!ret)) 211 - __put_user_asm(tmp, (u32 __user *)dst, 212 - ret, "l", "k", "ir", 4); 213 - __uaccess_end(); 214 - return ret; 215 - } 216 - case 8: { 217 - u64 tmp; 218 - __uaccess_begin(); 219 - __get_user_asm(tmp, (u64 __user *)src, 220 - ret, "q", "", "=r", 8); 221 - if (likely(!ret)) 222 - __put_user_asm(tmp, (u64 __user *)dst, 223 - ret, "q", "", "er", 8); 224 - __uaccess_end(); 225 - return ret; 226 - } 227 - default: 228 - return copy_user_generic((__force void *)dst, 229 - (__force void *)src, size); 230 - } 231 - } 232 - 233 - static __must_check __always_inline int 234 - __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) 235 - { 236 - kasan_check_write(dst, size); 237 - return __copy_from_user_nocheck(dst, src, size); 238 - } 239 - 240 - static __must_check __always_inline int 241 - __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) 242 - { 243 - kasan_check_read(src, size); 244 - return __copy_to_user_nocheck(dst, src, size); 181 + return copy_user_generic((__force void *)dst, 182 + (__force void *)src, size); 245 183 } 246 184 247 185 extern long __copy_user_nocache(void *dst, const void __user *src, 248 186 unsigned size, int zerorest); 249 - 250 - static inline int 251 - __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) 252 - { 253 - might_fault(); 254 - kasan_check_write(dst, size); 255 - return __copy_user_nocache(dst, src, size, 1); 256 - } 257 187 258 188 static inline int 259 189 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+1 -53
arch/x86/lib/usercopy.c
··· 4 4 * For licencing details see kernel-base/COPYING 5 5 */ 6 6 7 - #include <linux/highmem.h> 7 + #include <linux/uaccess.h> 8 8 #include <linux/export.h> 9 - 10 - #include <asm/word-at-a-time.h> 11 - #include <linux/sched.h> 12 9 13 10 /* 14 11 * We rely on the nested NMI work to allow atomic faults from the NMI path; the ··· 31 34 return ret; 32 35 } 33 36 EXPORT_SYMBOL_GPL(copy_from_user_nmi); 34 - 35 - /** 36 - * copy_to_user: - Copy a block of data into user space. 37 - * @to: Destination address, in user space. 38 - * @from: Source address, in kernel space. 39 - * @n: Number of bytes to copy. 40 - * 41 - * Context: User context only. This function may sleep if pagefaults are 42 - * enabled. 43 - * 44 - * Copy data from kernel space to user space. 45 - * 46 - * Returns number of bytes that could not be copied. 47 - * On success, this will be zero. 48 - */ 49 - unsigned long _copy_to_user(void __user *to, const void *from, unsigned n) 50 - { 51 - if (access_ok(VERIFY_WRITE, to, n)) 52 - n = __copy_to_user(to, from, n); 53 - return n; 54 - } 55 - EXPORT_SYMBOL(_copy_to_user); 56 - 57 - /** 58 - * copy_from_user: - Copy a block of data from user space. 59 - * @to: Destination address, in kernel space. 60 - * @from: Source address, in user space. 61 - * @n: Number of bytes to copy. 62 - * 63 - * Context: User context only. This function may sleep if pagefaults are 64 - * enabled. 65 - * 66 - * Copy data from user space to kernel space. 67 - * 68 - * Returns number of bytes that could not be copied. 69 - * On success, this will be zero. 70 - * 71 - * If some data could not be copied, this function will pad the copied 72 - * data to the requested size using zero bytes. 73 - */ 74 - unsigned long _copy_from_user(void *to, const void __user *from, unsigned n) 75 - { 76 - if (access_ok(VERIFY_READ, from, n)) 77 - n = __copy_from_user(to, from, n); 78 - else 79 - memset(to, 0, n); 80 - return n; 81 - } 82 - EXPORT_SYMBOL(_copy_from_user);
+2 -286
arch/x86/lib/usercopy_32.c
··· 5 5 * Copyright 1997 Andi Kleen <ak@muc.de> 6 6 * Copyright 1997 Linus Torvalds 7 7 */ 8 - #include <linux/mm.h> 9 - #include <linux/highmem.h> 10 - #include <linux/blkdev.h> 11 8 #include <linux/export.h> 12 - #include <linux/backing-dev.h> 13 - #include <linux/interrupt.h> 14 9 #include <linux/uaccess.h> 15 10 #include <asm/mmx.h> 16 11 #include <asm/asm.h> ··· 196 201 return size; 197 202 } 198 203 199 - static unsigned long 200 - __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) 201 - { 202 - int d0, d1; 203 - __asm__ __volatile__( 204 - " .align 2,0x90\n" 205 - "0: movl 32(%4), %%eax\n" 206 - " cmpl $67, %0\n" 207 - " jbe 2f\n" 208 - "1: movl 64(%4), %%eax\n" 209 - " .align 2,0x90\n" 210 - "2: movl 0(%4), %%eax\n" 211 - "21: movl 4(%4), %%edx\n" 212 - " movl %%eax, 0(%3)\n" 213 - " movl %%edx, 4(%3)\n" 214 - "3: movl 8(%4), %%eax\n" 215 - "31: movl 12(%4),%%edx\n" 216 - " movl %%eax, 8(%3)\n" 217 - " movl %%edx, 12(%3)\n" 218 - "4: movl 16(%4), %%eax\n" 219 - "41: movl 20(%4), %%edx\n" 220 - " movl %%eax, 16(%3)\n" 221 - " movl %%edx, 20(%3)\n" 222 - "10: movl 24(%4), %%eax\n" 223 - "51: movl 28(%4), %%edx\n" 224 - " movl %%eax, 24(%3)\n" 225 - " movl %%edx, 28(%3)\n" 226 - "11: movl 32(%4), %%eax\n" 227 - "61: movl 36(%4), %%edx\n" 228 - " movl %%eax, 32(%3)\n" 229 - " movl %%edx, 36(%3)\n" 230 - "12: movl 40(%4), %%eax\n" 231 - "71: movl 44(%4), %%edx\n" 232 - " movl %%eax, 40(%3)\n" 233 - " movl %%edx, 44(%3)\n" 234 - "13: movl 48(%4), %%eax\n" 235 - "81: movl 52(%4), %%edx\n" 236 - " movl %%eax, 48(%3)\n" 237 - " movl %%edx, 52(%3)\n" 238 - "14: movl 56(%4), %%eax\n" 239 - "91: movl 60(%4), %%edx\n" 240 - " movl %%eax, 56(%3)\n" 241 - " movl %%edx, 60(%3)\n" 242 - " addl $-64, %0\n" 243 - " addl $64, %4\n" 244 - " addl $64, %3\n" 245 - " cmpl $63, %0\n" 246 - " ja 0b\n" 247 - "5: movl %0, %%eax\n" 248 - " shrl $2, %0\n" 249 - " andl $3, %%eax\n" 250 - " cld\n" 251 - "6: rep; movsl\n" 252 - " movl %%eax,%0\n" 253 - "7: rep; movsb\n" 254 - "8:\n" 255 - ".section .fixup,\"ax\"\n" 256 - "9: lea 0(%%eax,%0,4),%0\n" 257 - "16: pushl %0\n" 258 - " pushl %%eax\n" 259 - " xorl %%eax,%%eax\n" 260 - " rep; stosb\n" 261 - " popl %%eax\n" 262 - " popl %0\n" 263 - " jmp 8b\n" 264 - ".previous\n" 265 - _ASM_EXTABLE(0b,16b) 266 - _ASM_EXTABLE(1b,16b) 267 - _ASM_EXTABLE(2b,16b) 268 - _ASM_EXTABLE(21b,16b) 269 - _ASM_EXTABLE(3b,16b) 270 - _ASM_EXTABLE(31b,16b) 271 - _ASM_EXTABLE(4b,16b) 272 - _ASM_EXTABLE(41b,16b) 273 - _ASM_EXTABLE(10b,16b) 274 - _ASM_EXTABLE(51b,16b) 275 - _ASM_EXTABLE(11b,16b) 276 - _ASM_EXTABLE(61b,16b) 277 - _ASM_EXTABLE(12b,16b) 278 - _ASM_EXTABLE(71b,16b) 279 - _ASM_EXTABLE(13b,16b) 280 - _ASM_EXTABLE(81b,16b) 281 - _ASM_EXTABLE(14b,16b) 282 - _ASM_EXTABLE(91b,16b) 283 - _ASM_EXTABLE(6b,9b) 284 - _ASM_EXTABLE(7b,16b) 285 - : "=&c"(size), "=&D" (d0), "=&S" (d1) 286 - : "1"(to), "2"(from), "0"(size) 287 - : "eax", "edx", "memory"); 288 - return size; 289 - } 290 - 291 - /* 292 - * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware. 293 - * hyoshiok@miraclelinux.com 294 - */ 295 - 296 - static unsigned long __copy_user_zeroing_intel_nocache(void *to, 297 - const void __user *from, unsigned long size) 298 - { 299 - int d0, d1; 300 - 301 - __asm__ __volatile__( 302 - " .align 2,0x90\n" 303 - "0: movl 32(%4), %%eax\n" 304 - " cmpl $67, %0\n" 305 - " jbe 2f\n" 306 - "1: movl 64(%4), %%eax\n" 307 - " .align 2,0x90\n" 308 - "2: movl 0(%4), %%eax\n" 309 - "21: movl 4(%4), %%edx\n" 310 - " movnti %%eax, 0(%3)\n" 311 - " movnti %%edx, 4(%3)\n" 312 - "3: movl 8(%4), %%eax\n" 313 - "31: movl 12(%4),%%edx\n" 314 - " movnti %%eax, 8(%3)\n" 315 - " movnti %%edx, 12(%3)\n" 316 - "4: movl 16(%4), %%eax\n" 317 - "41: movl 20(%4), %%edx\n" 318 - " movnti %%eax, 16(%3)\n" 319 - " movnti %%edx, 20(%3)\n" 320 - "10: movl 24(%4), %%eax\n" 321 - "51: movl 28(%4), %%edx\n" 322 - " movnti %%eax, 24(%3)\n" 323 - " movnti %%edx, 28(%3)\n" 324 - "11: movl 32(%4), %%eax\n" 325 - "61: movl 36(%4), %%edx\n" 326 - " movnti %%eax, 32(%3)\n" 327 - " movnti %%edx, 36(%3)\n" 328 - "12: movl 40(%4), %%eax\n" 329 - "71: movl 44(%4), %%edx\n" 330 - " movnti %%eax, 40(%3)\n" 331 - " movnti %%edx, 44(%3)\n" 332 - "13: movl 48(%4), %%eax\n" 333 - "81: movl 52(%4), %%edx\n" 334 - " movnti %%eax, 48(%3)\n" 335 - " movnti %%edx, 52(%3)\n" 336 - "14: movl 56(%4), %%eax\n" 337 - "91: movl 60(%4), %%edx\n" 338 - " movnti %%eax, 56(%3)\n" 339 - " movnti %%edx, 60(%3)\n" 340 - " addl $-64, %0\n" 341 - " addl $64, %4\n" 342 - " addl $64, %3\n" 343 - " cmpl $63, %0\n" 344 - " ja 0b\n" 345 - " sfence \n" 346 - "5: movl %0, %%eax\n" 347 - " shrl $2, %0\n" 348 - " andl $3, %%eax\n" 349 - " cld\n" 350 - "6: rep; movsl\n" 351 - " movl %%eax,%0\n" 352 - "7: rep; movsb\n" 353 - "8:\n" 354 - ".section .fixup,\"ax\"\n" 355 - "9: lea 0(%%eax,%0,4),%0\n" 356 - "16: pushl %0\n" 357 - " pushl %%eax\n" 358 - " xorl %%eax,%%eax\n" 359 - " rep; stosb\n" 360 - " popl %%eax\n" 361 - " popl %0\n" 362 - " jmp 8b\n" 363 - ".previous\n" 364 - _ASM_EXTABLE(0b,16b) 365 - _ASM_EXTABLE(1b,16b) 366 - _ASM_EXTABLE(2b,16b) 367 - _ASM_EXTABLE(21b,16b) 368 - _ASM_EXTABLE(3b,16b) 369 - _ASM_EXTABLE(31b,16b) 370 - _ASM_EXTABLE(4b,16b) 371 - _ASM_EXTABLE(41b,16b) 372 - _ASM_EXTABLE(10b,16b) 373 - _ASM_EXTABLE(51b,16b) 374 - _ASM_EXTABLE(11b,16b) 375 - _ASM_EXTABLE(61b,16b) 376 - _ASM_EXTABLE(12b,16b) 377 - _ASM_EXTABLE(71b,16b) 378 - _ASM_EXTABLE(13b,16b) 379 - _ASM_EXTABLE(81b,16b) 380 - _ASM_EXTABLE(14b,16b) 381 - _ASM_EXTABLE(91b,16b) 382 - _ASM_EXTABLE(6b,9b) 383 - _ASM_EXTABLE(7b,16b) 384 - : "=&c"(size), "=&D" (d0), "=&S" (d1) 385 - : "1"(to), "2"(from), "0"(size) 386 - : "eax", "edx", "memory"); 387 - return size; 388 - } 389 - 390 204 static unsigned long __copy_user_intel_nocache(void *to, 391 205 const void __user *from, unsigned long size) 392 206 { ··· 290 486 * Leave these declared but undefined. They should not be any references to 291 487 * them 292 488 */ 293 - unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, 294 - unsigned long size); 295 489 unsigned long __copy_user_intel(void __user *to, const void *from, 296 490 unsigned long size); 297 - unsigned long __copy_user_zeroing_intel_nocache(void *to, 298 - const void __user *from, unsigned long size); 299 491 #endif /* CONFIG_X86_INTEL_USERCOPY */ 300 492 301 493 /* Generic arbitrary sized copy. */ ··· 328 528 : "memory"); \ 329 529 } while (0) 330 530 331 - #define __copy_user_zeroing(to, from, size) \ 332 - do { \ 333 - int __d0, __d1, __d2; \ 334 - __asm__ __volatile__( \ 335 - " cmp $7,%0\n" \ 336 - " jbe 1f\n" \ 337 - " movl %1,%0\n" \ 338 - " negl %0\n" \ 339 - " andl $7,%0\n" \ 340 - " subl %0,%3\n" \ 341 - "4: rep; movsb\n" \ 342 - " movl %3,%0\n" \ 343 - " shrl $2,%0\n" \ 344 - " andl $3,%3\n" \ 345 - " .align 2,0x90\n" \ 346 - "0: rep; movsl\n" \ 347 - " movl %3,%0\n" \ 348 - "1: rep; movsb\n" \ 349 - "2:\n" \ 350 - ".section .fixup,\"ax\"\n" \ 351 - "5: addl %3,%0\n" \ 352 - " jmp 6f\n" \ 353 - "3: lea 0(%3,%0,4),%0\n" \ 354 - "6: pushl %0\n" \ 355 - " pushl %%eax\n" \ 356 - " xorl %%eax,%%eax\n" \ 357 - " rep; stosb\n" \ 358 - " popl %%eax\n" \ 359 - " popl %0\n" \ 360 - " jmp 2b\n" \ 361 - ".previous\n" \ 362 - _ASM_EXTABLE(4b,5b) \ 363 - _ASM_EXTABLE(0b,3b) \ 364 - _ASM_EXTABLE(1b,6b) \ 365 - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ 366 - : "3"(size), "0"(size), "1"(to), "2"(from) \ 367 - : "memory"); \ 368 - } while (0) 369 - 370 - unsigned long __copy_to_user_ll(void __user *to, const void *from, 371 - unsigned long n) 531 + unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) 372 532 { 373 533 stac(); 374 534 if (movsl_is_ok(to, from, n)) ··· 338 578 clac(); 339 579 return n; 340 580 } 341 - EXPORT_SYMBOL(__copy_to_user_ll); 342 - 343 - unsigned long __copy_from_user_ll(void *to, const void __user *from, 344 - unsigned long n) 345 - { 346 - stac(); 347 - if (movsl_is_ok(to, from, n)) 348 - __copy_user_zeroing(to, from, n); 349 - else 350 - n = __copy_user_zeroing_intel(to, from, n); 351 - clac(); 352 - return n; 353 - } 354 - EXPORT_SYMBOL(__copy_from_user_ll); 355 - 356 - unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, 357 - unsigned long n) 358 - { 359 - stac(); 360 - if (movsl_is_ok(to, from, n)) 361 - __copy_user(to, from, n); 362 - else 363 - n = __copy_user_intel((void __user *)to, 364 - (const void *)from, n); 365 - clac(); 366 - return n; 367 - } 368 - EXPORT_SYMBOL(__copy_from_user_ll_nozero); 369 - 370 - unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, 371 - unsigned long n) 372 - { 373 - stac(); 374 - #ifdef CONFIG_X86_INTEL_USERCOPY 375 - if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) 376 - n = __copy_user_zeroing_intel_nocache(to, from, n); 377 - else 378 - __copy_user_zeroing(to, from, n); 379 - #else 380 - __copy_user_zeroing(to, from, n); 381 - #endif 382 - clac(); 383 - return n; 384 - } 385 - EXPORT_SYMBOL(__copy_from_user_ll_nocache); 581 + EXPORT_SYMBOL(__copy_user_ll); 386 582 387 583 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, 388 584 unsigned long n)
-13
arch/x86/lib/usercopy_64.c
··· 54 54 } 55 55 EXPORT_SYMBOL(clear_user); 56 56 57 - unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) 58 - { 59 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 60 - return copy_user_generic((__force void *)to, (__force void *)from, len); 61 - } 62 - return len; 63 - } 64 - EXPORT_SYMBOL(copy_in_user); 65 - 66 57 /* 67 58 * Try to copy last bytes and clear the rest if needed. 68 59 * Since protection fault in copy_from/to_user is not a normal situation, ··· 71 80 break; 72 81 } 73 82 clac(); 74 - 75 - /* If the destination is a kernel buffer, we always clear the end */ 76 - if (!__addr_ok(to)) 77 - memset(to, 0, len); 78 83 return len; 79 84 }
+1
arch/xtensa/include/asm/Kbuild
··· 6 6 generic-y += emergency-restart.h 7 7 generic-y += errno.h 8 8 generic-y += exec.h 9 + generic-y += extable.h 9 10 generic-y += fcntl.h 10 11 generic-y += hardirq.h 11 12 generic-y += ioctl.h
-3
arch/xtensa/include/asm/asm-uaccess.h
··· 19 19 #include <linux/errno.h> 20 20 #include <asm/types.h> 21 21 22 - #define VERIFY_READ 0 23 - #define VERIFY_WRITE 1 24 - 25 22 #include <asm/current.h> 26 23 #include <asm/asm-offsets.h> 27 24 #include <asm/processor.h>
+12 -61
arch/xtensa/include/asm/uaccess.h
··· 16 16 #ifndef _XTENSA_UACCESS_H 17 17 #define _XTENSA_UACCESS_H 18 18 19 - #include <linux/errno.h> 20 19 #include <linux/prefetch.h> 21 20 #include <asm/types.h> 22 - 23 - #define VERIFY_READ 0 24 - #define VERIFY_WRITE 1 25 - 26 - #include <linux/sched.h> 21 + #include <asm/extable.h> 27 22 28 23 /* 29 24 * The fs value determines whether argument validity checking should ··· 38 43 39 44 #define segment_eq(a, b) ((a).seg == (b).seg) 40 45 41 - #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 46 + #define __kernel_ok (uaccess_kernel()) 42 47 #define __user_ok(addr, size) \ 43 48 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 44 49 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) ··· 234 239 * Copy to/from user space 235 240 */ 236 241 237 - /* 238 - * We use a generic, arbitrary-sized copy subroutine. The Xtensa 239 - * architecture would cause heavy code bloat if we tried to inline 240 - * these functions and provide __constant_copy_* equivalents like the 241 - * i386 versions. __xtensa_copy_user is quite efficient. See the 242 - * .fixup section of __xtensa_copy_user for a discussion on the 243 - * X_zeroing equivalents for Xtensa. 244 - */ 245 - 246 242 extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); 247 - #define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) 248 - 249 243 250 244 static inline unsigned long 251 - __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) 252 - { 253 - return __copy_user(to, from, n); 254 - } 255 - 256 - static inline unsigned long 257 - __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) 258 - { 259 - return __copy_user(to, from, n); 260 - } 261 - 262 - static inline unsigned long 263 - __generic_copy_to_user(void *to, const void *from, unsigned long n) 264 - { 265 - prefetch(from); 266 - if (access_ok(VERIFY_WRITE, to, n)) 267 - return __copy_user(to, from, n); 268 - return n; 269 - } 270 - 271 - static inline unsigned long 272 - __generic_copy_from_user(void *to, const void *from, unsigned long n) 245 + raw_copy_from_user(void *to, const void __user *from, unsigned long n) 273 246 { 274 247 prefetchw(to); 275 - if (access_ok(VERIFY_READ, from, n)) 276 - return __copy_user(to, from, n); 277 - else 278 - memset(to, 0, n); 279 - return n; 248 + return __xtensa_copy_user(to, (__force const void *)from, n); 280 249 } 281 - 282 - #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) 283 - #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) 284 - #define __copy_to_user(to, from, n) \ 285 - __generic_copy_to_user_nocheck((to), (from), (n)) 286 - #define __copy_from_user(to, from, n) \ 287 - __generic_copy_from_user_nocheck((to), (from), (n)) 288 - #define __copy_to_user_inatomic __copy_to_user 289 - #define __copy_from_user_inatomic __copy_from_user 290 - 250 + static inline unsigned long 251 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 252 + { 253 + prefetch(from); 254 + return __xtensa_copy_user((__force void *)to, from, n); 255 + } 256 + #define INLINE_COPY_FROM_USER 257 + #define INLINE_COPY_TO_USER 291 258 292 259 /* 293 260 * We need to return the number of bytes not cleared. Our memset() ··· 304 347 return 0; 305 348 return __strnlen_user(str, len); 306 349 } 307 - 308 - 309 - struct exception_table_entry 310 - { 311 - unsigned long insn, fixup; 312 - }; 313 350 314 351 #endif /* _XTENSA_UACCESS_H */
+48 -68
arch/xtensa/lib/usercopy.S
··· 102 102 bltui a4, 7, .Lbytecopy # do short copies byte by byte 103 103 104 104 # copy 1 byte 105 - EX(l8ui, a6, a3, 0, l_fixup) 105 + EX(l8ui, a6, a3, 0, fixup) 106 106 addi a3, a3, 1 107 - EX(s8i, a6, a5, 0, s_fixup) 107 + EX(s8i, a6, a5, 0, fixup) 108 108 addi a5, a5, 1 109 109 addi a4, a4, -1 110 110 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then ··· 112 112 .Ldst2mod4: # dst 16-bit aligned 113 113 # copy 2 bytes 114 114 bltui a4, 6, .Lbytecopy # do short copies byte by byte 115 - EX(l8ui, a6, a3, 0, l_fixup) 116 - EX(l8ui, a7, a3, 1, l_fixup) 115 + EX(l8ui, a6, a3, 0, fixup) 116 + EX(l8ui, a7, a3, 1, fixup) 117 117 addi a3, a3, 2 118 - EX(s8i, a6, a5, 0, s_fixup) 119 - EX(s8i, a7, a5, 1, s_fixup) 118 + EX(s8i, a6, a5, 0, fixup) 119 + EX(s8i, a7, a5, 1, fixup) 120 120 addi a5, a5, 2 121 121 addi a4, a4, -2 122 122 j .Ldstaligned # dst is now aligned, return to main algorithm ··· 135 135 add a7, a3, a4 # a7 = end address for source 136 136 #endif /* !XCHAL_HAVE_LOOPS */ 137 137 .Lnextbyte: 138 - EX(l8ui, a6, a3, 0, l_fixup) 138 + EX(l8ui, a6, a3, 0, fixup) 139 139 addi a3, a3, 1 140 - EX(s8i, a6, a5, 0, s_fixup) 140 + EX(s8i, a6, a5, 0, fixup) 141 141 addi a5, a5, 1 142 142 #if !XCHAL_HAVE_LOOPS 143 143 blt a3, a7, .Lnextbyte ··· 161 161 add a8, a8, a3 # a8 = end of last 16B source chunk 162 162 #endif /* !XCHAL_HAVE_LOOPS */ 163 163 .Loop1: 164 - EX(l32i, a6, a3, 0, l_fixup) 165 - EX(l32i, a7, a3, 4, l_fixup) 166 - EX(s32i, a6, a5, 0, s_fixup) 167 - EX(l32i, a6, a3, 8, l_fixup) 168 - EX(s32i, a7, a5, 4, s_fixup) 169 - EX(l32i, a7, a3, 12, l_fixup) 170 - EX(s32i, a6, a5, 8, s_fixup) 164 + EX(l32i, a6, a3, 0, fixup) 165 + EX(l32i, a7, a3, 4, fixup) 166 + EX(s32i, a6, a5, 0, fixup) 167 + EX(l32i, a6, a3, 8, fixup) 168 + EX(s32i, a7, a5, 4, fixup) 169 + EX(l32i, a7, a3, 12, fixup) 170 + EX(s32i, a6, a5, 8, fixup) 171 171 addi a3, a3, 16 172 - EX(s32i, a7, a5, 12, s_fixup) 172 + EX(s32i, a7, a5, 12, fixup) 173 173 addi a5, a5, 16 174 174 #if !XCHAL_HAVE_LOOPS 175 175 blt a3, a8, .Loop1 ··· 177 177 .Loop1done: 178 178 bbci.l a4, 3, .L2 179 179 # copy 8 bytes 180 - EX(l32i, a6, a3, 0, l_fixup) 181 - EX(l32i, a7, a3, 4, l_fixup) 180 + EX(l32i, a6, a3, 0, fixup) 181 + EX(l32i, a7, a3, 4, fixup) 182 182 addi a3, a3, 8 183 - EX(s32i, a6, a5, 0, s_fixup) 184 - EX(s32i, a7, a5, 4, s_fixup) 183 + EX(s32i, a6, a5, 0, fixup) 184 + EX(s32i, a7, a5, 4, fixup) 185 185 addi a5, a5, 8 186 186 .L2: 187 187 bbci.l a4, 2, .L3 188 188 # copy 4 bytes 189 - EX(l32i, a6, a3, 0, l_fixup) 189 + EX(l32i, a6, a3, 0, fixup) 190 190 addi a3, a3, 4 191 - EX(s32i, a6, a5, 0, s_fixup) 191 + EX(s32i, a6, a5, 0, fixup) 192 192 addi a5, a5, 4 193 193 .L3: 194 194 bbci.l a4, 1, .L4 195 195 # copy 2 bytes 196 - EX(l16ui, a6, a3, 0, l_fixup) 196 + EX(l16ui, a6, a3, 0, fixup) 197 197 addi a3, a3, 2 198 - EX(s16i, a6, a5, 0, s_fixup) 198 + EX(s16i, a6, a5, 0, fixup) 199 199 addi a5, a5, 2 200 200 .L4: 201 201 bbci.l a4, 0, .L5 202 202 # copy 1 byte 203 - EX(l8ui, a6, a3, 0, l_fixup) 204 - EX(s8i, a6, a5, 0, s_fixup) 203 + EX(l8ui, a6, a3, 0, fixup) 204 + EX(s8i, a6, a5, 0, fixup) 205 205 .L5: 206 206 movi a2, 0 # return success for len bytes copied 207 207 retw ··· 217 217 # copy 16 bytes per iteration for word-aligned dst and unaligned src 218 218 and a10, a3, a8 # save unalignment offset for below 219 219 sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) 220 - EX(l32i, a6, a3, 0, l_fixup) # load first word 220 + EX(l32i, a6, a3, 0, fixup) # load first word 221 221 #if XCHAL_HAVE_LOOPS 222 222 loopnez a7, .Loop2done 223 223 #else /* !XCHAL_HAVE_LOOPS */ ··· 226 226 add a12, a12, a3 # a12 = end of last 16B source chunk 227 227 #endif /* !XCHAL_HAVE_LOOPS */ 228 228 .Loop2: 229 - EX(l32i, a7, a3, 4, l_fixup) 230 - EX(l32i, a8, a3, 8, l_fixup) 229 + EX(l32i, a7, a3, 4, fixup) 230 + EX(l32i, a8, a3, 8, fixup) 231 231 ALIGN( a6, a6, a7) 232 - EX(s32i, a6, a5, 0, s_fixup) 233 - EX(l32i, a9, a3, 12, l_fixup) 232 + EX(s32i, a6, a5, 0, fixup) 233 + EX(l32i, a9, a3, 12, fixup) 234 234 ALIGN( a7, a7, a8) 235 - EX(s32i, a7, a5, 4, s_fixup) 236 - EX(l32i, a6, a3, 16, l_fixup) 235 + EX(s32i, a7, a5, 4, fixup) 236 + EX(l32i, a6, a3, 16, fixup) 237 237 ALIGN( a8, a8, a9) 238 - EX(s32i, a8, a5, 8, s_fixup) 238 + EX(s32i, a8, a5, 8, fixup) 239 239 addi a3, a3, 16 240 240 ALIGN( a9, a9, a6) 241 - EX(s32i, a9, a5, 12, s_fixup) 241 + EX(s32i, a9, a5, 12, fixup) 242 242 addi a5, a5, 16 243 243 #if !XCHAL_HAVE_LOOPS 244 244 blt a3, a12, .Loop2 ··· 246 246 .Loop2done: 247 247 bbci.l a4, 3, .L12 248 248 # copy 8 bytes 249 - EX(l32i, a7, a3, 4, l_fixup) 250 - EX(l32i, a8, a3, 8, l_fixup) 249 + EX(l32i, a7, a3, 4, fixup) 250 + EX(l32i, a8, a3, 8, fixup) 251 251 ALIGN( a6, a6, a7) 252 - EX(s32i, a6, a5, 0, s_fixup) 252 + EX(s32i, a6, a5, 0, fixup) 253 253 addi a3, a3, 8 254 254 ALIGN( a7, a7, a8) 255 - EX(s32i, a7, a5, 4, s_fixup) 255 + EX(s32i, a7, a5, 4, fixup) 256 256 addi a5, a5, 8 257 257 mov a6, a8 258 258 .L12: 259 259 bbci.l a4, 2, .L13 260 260 # copy 4 bytes 261 - EX(l32i, a7, a3, 4, l_fixup) 261 + EX(l32i, a7, a3, 4, fixup) 262 262 addi a3, a3, 4 263 263 ALIGN( a6, a6, a7) 264 - EX(s32i, a6, a5, 0, s_fixup) 264 + EX(s32i, a6, a5, 0, fixup) 265 265 addi a5, a5, 4 266 266 mov a6, a7 267 267 .L13: 268 268 add a3, a3, a10 # readjust a3 with correct misalignment 269 269 bbci.l a4, 1, .L14 270 270 # copy 2 bytes 271 - EX(l8ui, a6, a3, 0, l_fixup) 272 - EX(l8ui, a7, a3, 1, l_fixup) 271 + EX(l8ui, a6, a3, 0, fixup) 272 + EX(l8ui, a7, a3, 1, fixup) 273 273 addi a3, a3, 2 274 - EX(s8i, a6, a5, 0, s_fixup) 275 - EX(s8i, a7, a5, 1, s_fixup) 274 + EX(s8i, a6, a5, 0, fixup) 275 + EX(s8i, a7, a5, 1, fixup) 276 276 addi a5, a5, 2 277 277 .L14: 278 278 bbci.l a4, 0, .L15 279 279 # copy 1 byte 280 - EX(l8ui, a6, a3, 0, l_fixup) 281 - EX(s8i, a6, a5, 0, s_fixup) 280 + EX(l8ui, a6, a3, 0, fixup) 281 + EX(s8i, a6, a5, 0, fixup) 282 282 .L15: 283 283 movi a2, 0 # return success for len bytes copied 284 284 retw ··· 291 291 * bytes_copied = a5 - a2 292 292 * retval = bytes_not_copied = original len - bytes_copied 293 293 * retval = a11 - (a5 - a2) 294 - * 295 - * Clearing the remaining pieces of kernel memory plugs security 296 - * holes. This functionality is the equivalent of the *_zeroing 297 - * functions that some architectures provide. 298 294 */ 299 295 300 - .Lmemset: 301 - .word memset 302 296 303 - s_fixup: 297 + fixup: 304 298 sub a2, a5, a2 /* a2 <-- bytes copied */ 305 299 sub a2, a11, a2 /* a2 <-- bytes not copied */ 306 - retw 307 - 308 - l_fixup: 309 - sub a2, a5, a2 /* a2 <-- bytes copied */ 310 - sub a2, a11, a2 /* a2 <-- bytes not copied == return value */ 311 - 312 - /* void *memset(void *s, int c, size_t n); */ 313 - mov a6, a5 /* s */ 314 - movi a7, 0 /* c */ 315 - mov a8, a2 /* n */ 316 - l32r a4, .Lmemset 317 - callx4 a4 318 - /* Ignore memset return value in a6. */ 319 - /* a2 still contains bytes not copied. */ 320 300 retw
+1 -1
block/bsg.c
··· 650 650 651 651 dprintk("%s: write %zd bytes\n", bd->name, count); 652 652 653 - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) 653 + if (unlikely(uaccess_kernel())) 654 654 return -EINVAL; 655 655 656 656 bsg_set_block(bd, file);
+3 -22
drivers/scsi/esas2r/esas2r_ioctl.c
··· 1289 1289 || (cmd > EXPRESS_IOCTL_MAX)) 1290 1290 return -ENOTSUPP; 1291 1291 1292 - if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) { 1292 + ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl)); 1293 + if (IS_ERR(ioctl)) { 1293 1294 esas2r_log(ESAS2R_LOG_WARN, 1294 1295 "ioctl_handler access_ok failed for cmd %d, " 1295 1296 "address %p", cmd, 1296 1297 arg); 1297 - return -EFAULT; 1298 - } 1299 - 1300 - /* allocate a kernel memory buffer for the IOCTL data */ 1301 - ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); 1302 - if (ioctl == NULL) { 1303 - esas2r_log(ESAS2R_LOG_WARN, 1304 - "ioctl_handler kzalloc failed for %zu bytes", 1305 - sizeof(struct atto_express_ioctl)); 1306 - return -ENOMEM; 1307 - } 1308 - 1309 - err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl)); 1310 - if (err != 0) { 1311 - esas2r_log(ESAS2R_LOG_WARN, 1312 - "copy_from_user didn't copy everything (err %d, cmd %d)", 1313 - err, 1314 - cmd); 1315 - kfree(ioctl); 1316 - 1317 - return -EFAULT; 1298 + return PTR_ERR(ioctl); 1318 1299 } 1319 1300 1320 1301 /* verify the signature */
+1 -1
drivers/scsi/sg.c
··· 581 581 sg_io_hdr_t *hp; 582 582 unsigned char cmnd[SG_MAX_CDB_SIZE]; 583 583 584 - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) 584 + if (unlikely(uaccess_kernel())) 585 585 return -EINVAL; 586 586 587 587 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+4 -21
fs/ocfs2/cluster/tcp.c
··· 1460 1460 1461 1461 static int o2net_set_nodelay(struct socket *sock) 1462 1462 { 1463 - int ret, val = 1; 1464 - mm_segment_t oldfs; 1463 + int val = 1; 1465 1464 1466 - oldfs = get_fs(); 1467 - set_fs(KERNEL_DS); 1468 - 1469 - /* 1470 - * Dear unsuspecting programmer, 1471 - * 1472 - * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level 1473 - * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will 1474 - * silently turn into SO_DEBUG. 1475 - * 1476 - * Yours, 1477 - * Keeper of hilariously fragile interfaces. 1478 - */ 1479 - ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, 1480 - (char __user *)&val, sizeof(val)); 1481 - 1482 - set_fs(oldfs); 1483 - return ret; 1465 + return kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, 1466 + (void *)&val, sizeof(val)); 1484 1467 } 1485 1468 1486 1469 static int o2net_set_usertimeout(struct socket *sock) ··· 1471 1488 int user_timeout = O2NET_TCP_USER_TIMEOUT; 1472 1489 1473 1490 return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, 1474 - (char *)&user_timeout, sizeof(user_timeout)); 1491 + (void *)&user_timeout, sizeof(user_timeout)); 1475 1492 } 1476 1493 1477 1494 static void o2net_initialize_handshake(void)
+26
include/asm-generic/extable.h
··· 1 + #ifndef __ASM_GENERIC_EXTABLE_H 2 + #define __ASM_GENERIC_EXTABLE_H 3 + 4 + /* 5 + * The exception table consists of pairs of addresses: the first is the 6 + * address of an instruction that is allowed to fault, and the second is 7 + * the address at which the program should continue. No registers are 8 + * modified, so it is entirely up to the continuation code to figure out 9 + * what to do. 10 + * 11 + * All the routines below use bits of fixup code that are out of line 12 + * with the main instruction path. This means when everything is well, 13 + * we don't even have to jump over them. Further, they do not intrude 14 + * on our cache or tlb entries. 15 + */ 16 + 17 + struct exception_table_entry 18 + { 19 + unsigned long insn, fixup; 20 + }; 21 + 22 + 23 + struct pt_regs; 24 + extern int fixup_exception(struct pt_regs *regs); 25 + 26 + #endif
+8 -127
include/asm-generic/uaccess.h
··· 6 6 * on any machine that has kernel and user data in the same 7 7 * address space, e.g. all NOMMU machines. 8 8 */ 9 - #include <linux/sched.h> 10 9 #include <linux/string.h> 11 10 12 11 #include <asm/segment.h> ··· 34 35 #define segment_eq(a, b) ((a).seg == (b).seg) 35 36 #endif 36 37 37 - #define VERIFY_READ 0 38 - #define VERIFY_WRITE 1 39 - 40 38 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) 41 39 42 40 /* ··· 44 48 static inline int __access_ok(unsigned long addr, unsigned long size) 45 49 { 46 50 return 1; 47 - } 48 - #endif 49 - 50 - /* 51 - * The exception table consists of pairs of addresses: the first is the 52 - * address of an instruction that is allowed to fault, and the second is 53 - * the address at which the program should continue. No registers are 54 - * modified, so it is entirely up to the continuation code to figure out 55 - * what to do. 56 - * 57 - * All the routines below use bits of fixup code that are out of line 58 - * with the main instruction path. This means when everything is well, 59 - * we don't even have to jump over them. Further, they do not intrude 60 - * on our cache or tlb entries. 61 - */ 62 - 63 - struct exception_table_entry 64 - { 65 - unsigned long insn, fixup; 66 - }; 67 - 68 - /* 69 - * architectures with an MMU should override these two 70 - */ 71 - #ifndef __copy_from_user 72 - static inline __must_check long __copy_from_user(void *to, 73 - const void __user * from, unsigned long n) 74 - { 75 - if (__builtin_constant_p(n)) { 76 - switch(n) { 77 - case 1: 78 - *(u8 *)to = *(u8 __force *)from; 79 - return 0; 80 - case 2: 81 - *(u16 *)to = *(u16 __force *)from; 82 - return 0; 83 - case 4: 84 - *(u32 *)to = *(u32 __force *)from; 85 - return 0; 86 - #ifdef CONFIG_64BIT 87 - case 8: 88 - *(u64 *)to = *(u64 __force *)from; 89 - return 0; 90 - #endif 91 - default: 92 - break; 93 - } 94 - } 95 - 96 - memcpy(to, (const void __force *)from, n); 97 - return 0; 98 - } 99 - #endif 100 - 101 - #ifndef __copy_to_user 102 - static inline __must_check long __copy_to_user(void __user *to, 103 - const void *from, unsigned long n) 104 - { 105 - if (__builtin_constant_p(n)) { 106 - switch(n) { 107 - case 1: 108 - *(u8 __force *)to = *(u8 *)from; 109 - return 0; 110 - case 2: 111 - *(u16 __force *)to = *(u16 *)from; 112 - return 0; 113 - case 4: 114 - *(u32 __force *)to = *(u32 *)from; 115 - return 0; 116 - #ifdef CONFIG_64BIT 117 - case 8: 118 - *(u64 __force *)to = *(u64 *)from; 119 - return 0; 120 - #endif 121 - default: 122 - break; 123 - } 124 - } 125 - 126 - memcpy((void __force *)to, from, n); 127 - return 0; 128 51 } 129 52 #endif 130 53 ··· 86 171 87 172 static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 88 173 { 89 - size = __copy_to_user(ptr, x, size); 90 - return size ? -EFAULT : size; 174 + return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; 91 175 } 92 176 93 177 #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) ··· 101 187 __chk_user_ptr(ptr); \ 102 188 switch (sizeof(*(ptr))) { \ 103 189 case 1: { \ 104 - unsigned char __x; \ 190 + unsigned char __x = 0; \ 105 191 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 106 192 ptr, &__x); \ 107 193 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 108 194 break; \ 109 195 }; \ 110 196 case 2: { \ 111 - unsigned short __x; \ 197 + unsigned short __x = 0; \ 112 198 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 113 199 ptr, &__x); \ 114 200 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 115 201 break; \ 116 202 }; \ 117 203 case 4: { \ 118 - unsigned int __x; \ 204 + unsigned int __x = 0; \ 119 205 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 120 206 ptr, &__x); \ 121 207 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 122 208 break; \ 123 209 }; \ 124 210 case 8: { \ 125 - unsigned long long __x; \ 211 + unsigned long long __x = 0; \ 126 212 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 127 213 ptr, &__x); \ 128 214 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ ··· 147 233 #ifndef __get_user_fn 148 234 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 149 235 { 150 - size_t n = __copy_from_user(x, ptr, size); 151 - if (unlikely(n)) { 152 - memset(x + (size - n), 0, n); 153 - return -EFAULT; 154 - } 155 - return 0; 236 + return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; 156 237 } 157 238 158 239 #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) ··· 155 246 #endif 156 247 157 248 extern int __get_user_bad(void) __attribute__((noreturn)); 158 - 159 - #ifndef __copy_from_user_inatomic 160 - #define __copy_from_user_inatomic __copy_from_user 161 - #endif 162 - 163 - #ifndef __copy_to_user_inatomic 164 - #define __copy_to_user_inatomic __copy_to_user 165 - #endif 166 - 167 - static inline long copy_from_user(void *to, 168 - const void __user * from, unsigned long n) 169 - { 170 - unsigned long res = n; 171 - might_fault(); 172 - if (likely(access_ok(VERIFY_READ, from, n))) 173 - res = __copy_from_user(to, from, n); 174 - if (unlikely(res)) 175 - memset(to + (n - res), 0, res); 176 - return res; 177 - } 178 - 179 - static inline long copy_to_user(void __user *to, 180 - const void *from, unsigned long n) 181 - { 182 - might_fault(); 183 - if (access_ok(VERIFY_WRITE, to, n)) 184 - return __copy_to_user(to, from, n); 185 - else 186 - return n; 187 - } 188 249 189 250 /* 190 251 * Copy a null terminated string from userspace. ··· 226 347 227 348 return __clear_user(to, n); 228 349 } 350 + 351 + #include <asm/extable.h> 229 352 230 353 #endif /* __ASM_GENERIC_UACCESS_H */
+191 -6
include/linux/uaccess.h
··· 2 2 #define __LINUX_UACCESS_H__ 3 3 4 4 #include <linux/sched.h> 5 + #include <linux/thread_info.h> 6 + #include <linux/kasan-checks.h> 7 + 8 + #define VERIFY_READ 0 9 + #define VERIFY_WRITE 1 10 + 11 + #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) 12 + 5 13 #include <asm/uaccess.h> 14 + 15 + /* 16 + * Architectures should provide two primitives (raw_copy_{to,from}_user()) 17 + * and get rid of their private instances of copy_{to,from}_user() and 18 + * __copy_{to,from}_user{,_inatomic}(). 19 + * 20 + * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and 21 + * return the amount left to copy. They should assume that access_ok() has 22 + * already been checked (and succeeded); they should *not* zero-pad anything. 23 + * No KASAN or object size checks either - those belong here. 24 + * 25 + * Both of these functions should attempt to copy size bytes starting at from 26 + * into the area starting at to. They must not fetch or store anything 27 + * outside of those areas. Return value must be between 0 (everything 28 + * copied successfully) and size (nothing copied). 29 + * 30 + * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting 31 + * at to must become equal to the bytes fetched from the corresponding area 32 + * starting at from. All data past to + size - N must be left unmodified. 33 + * 34 + * If copying succeeds, the return value must be 0. If some data cannot be 35 + * fetched, it is permitted to copy less than had been fetched; the only 36 + * hard requirement is that not storing anything at all (i.e. returning size) 37 + * should happen only when nothing could be copied. In other words, you don't 38 + * have to squeeze as much as possible - it is allowed, but not necessary. 39 + * 40 + * For raw_copy_from_user() to always points to kernel memory and no faults 41 + * on store should happen. Interpretation of from is affected by set_fs(). 42 + * For raw_copy_to_user() it's the other way round. 43 + * 44 + * Both can be inlined - it's up to architectures whether it wants to bother 45 + * with that. They should not be used directly; they are used to implement 46 + * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) 47 + * that are used instead. Out of those, __... ones are inlined. Plain 48 + * copy_{to,from}_user() might or might not be inlined. If you want them 49 + * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. 50 + * 51 + * NOTE: only copy_from_user() zero-pads the destination in case of short copy. 52 + * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything 53 + * at all; their callers absolutely must check the return value. 54 + * 55 + * Biarch ones should also provide raw_copy_in_user() - similar to the above, 56 + * but both source and destination are __user pointers (affected by set_fs() 57 + * as usual) and both source and destination can trigger faults. 58 + */ 59 + 60 + static __always_inline unsigned long 61 + __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 62 + { 63 + kasan_check_write(to, n); 64 + check_object_size(to, n, false); 65 + return raw_copy_from_user(to, from, n); 66 + } 67 + 68 + static __always_inline unsigned long 69 + __copy_from_user(void *to, const void __user *from, unsigned long n) 70 + { 71 + might_fault(); 72 + kasan_check_write(to, n); 73 + check_object_size(to, n, false); 74 + return raw_copy_from_user(to, from, n); 75 + } 76 + 77 + /** 78 + * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. 79 + * @to: Destination address, in user space. 80 + * @from: Source address, in kernel space. 81 + * @n: Number of bytes to copy. 82 + * 83 + * Context: User context only. 84 + * 85 + * Copy data from kernel space to user space. Caller must check 86 + * the specified block with access_ok() before calling this function. 87 + * The caller should also make sure he pins the user space address 88 + * so that we don't result in page fault and sleep. 89 + */ 90 + static __always_inline unsigned long 91 + __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 92 + { 93 + kasan_check_read(from, n); 94 + check_object_size(from, n, true); 95 + return raw_copy_to_user(to, from, n); 96 + } 97 + 98 + static __always_inline unsigned long 99 + __copy_to_user(void __user *to, const void *from, unsigned long n) 100 + { 101 + might_fault(); 102 + kasan_check_read(from, n); 103 + check_object_size(from, n, true); 104 + return raw_copy_to_user(to, from, n); 105 + } 106 + 107 + #ifdef INLINE_COPY_FROM_USER 108 + static inline unsigned long 109 + _copy_from_user(void *to, const void __user *from, unsigned long n) 110 + { 111 + unsigned long res = n; 112 + if (likely(access_ok(VERIFY_READ, from, n))) 113 + res = raw_copy_from_user(to, from, n); 114 + if (unlikely(res)) 115 + memset(to + (n - res), 0, res); 116 + return res; 117 + } 118 + #else 119 + extern unsigned long 120 + _copy_from_user(void *, const void __user *, unsigned long); 121 + #endif 122 + 123 + #ifdef INLINE_COPY_TO_USER 124 + static inline unsigned long 125 + _copy_to_user(void __user *to, const void *from, unsigned long n) 126 + { 127 + if (access_ok(VERIFY_WRITE, to, n)) 128 + n = raw_copy_to_user(to, from, n); 129 + return n; 130 + } 131 + #else 132 + extern unsigned long 133 + _copy_to_user(void __user *, const void *, unsigned long); 134 + #endif 135 + 136 + extern void __compiletime_error("usercopy buffer size is too small") 137 + __bad_copy_user(void); 138 + 139 + static inline void copy_user_overflow(int size, unsigned long count) 140 + { 141 + WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 142 + } 143 + 144 + static __always_inline unsigned long __must_check 145 + copy_from_user(void *to, const void __user *from, unsigned long n) 146 + { 147 + int sz = __compiletime_object_size(to); 148 + 149 + might_fault(); 150 + kasan_check_write(to, n); 151 + 152 + if (likely(sz < 0 || sz >= n)) { 153 + check_object_size(to, n, false); 154 + n = _copy_from_user(to, from, n); 155 + } else if (!__builtin_constant_p(n)) 156 + copy_user_overflow(sz, n); 157 + else 158 + __bad_copy_user(); 159 + 160 + return n; 161 + } 162 + 163 + static __always_inline unsigned long __must_check 164 + copy_to_user(void __user *to, const void *from, unsigned long n) 165 + { 166 + int sz = __compiletime_object_size(from); 167 + 168 + kasan_check_read(from, n); 169 + might_fault(); 170 + 171 + if (likely(sz < 0 || sz >= n)) { 172 + check_object_size(from, n, true); 173 + n = _copy_to_user(to, from, n); 174 + } else if (!__builtin_constant_p(n)) 175 + copy_user_overflow(sz, n); 176 + else 177 + __bad_copy_user(); 178 + 179 + return n; 180 + } 181 + #ifdef CONFIG_COMPAT 182 + static __always_inline unsigned long __must_check 183 + __copy_in_user(void __user *to, const void *from, unsigned long n) 184 + { 185 + might_fault(); 186 + return raw_copy_in_user(to, from, n); 187 + } 188 + static __always_inline unsigned long __must_check 189 + copy_in_user(void __user *to, const void *from, unsigned long n) 190 + { 191 + might_fault(); 192 + if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) 193 + n = raw_copy_in_user(to, from, n); 194 + return n; 195 + } 196 + #endif 6 197 7 198 static __always_inline void pagefault_disabled_inc(void) 8 199 { ··· 256 65 const void __user *from, unsigned long n) 257 66 { 258 67 return __copy_from_user_inatomic(to, from, n); 259 - } 260 - 261 - static inline unsigned long __copy_from_user_nocache(void *to, 262 - const void __user *from, unsigned long n) 263 - { 264 - return __copy_from_user(to, from, n); 265 68 } 266 69 267 70 #endif /* ARCH_HAS_NOCACHE_UACCESS */
+1 -1
include/rdma/ib.h
··· 100 100 */ 101 101 static inline bool ib_safe_file_access(struct file *filp) 102 102 { 103 - return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); 103 + return filp->f_cred == current_cred() && !uaccess_kernel(); 104 104 } 105 105 106 106 #endif /* _RDMA_IB_H */
+1 -1
kernel/trace/bpf_trace.c
··· 96 96 if (unlikely(in_interrupt() || 97 97 current->flags & (PF_KTHREAD | PF_EXITING))) 98 98 return -EPERM; 99 - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) 99 + if (unlikely(uaccess_kernel())) 100 100 return -EPERM; 101 101 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) 102 102 return -EPERM;
+1 -1
lib/Makefile
··· 41 41 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ 42 42 bsearch.o find_bit.o llist.o memweight.o kfifo.o \ 43 43 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ 44 - once.o refcount.o 44 + once.o refcount.o usercopy.o 45 45 obj-y += string_helpers.o 46 46 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 47 47 obj-y += hexdump.o
+3 -3
lib/iov_iter.c
··· 413 413 size_t count) 414 414 { 415 415 /* It will get better. Eventually... */ 416 - if (segment_eq(get_fs(), KERNEL_DS)) { 416 + if (uaccess_kernel()) { 417 417 direction |= ITER_KVEC; 418 418 i->type = direction; 419 419 i->kvec = (struct kvec *)iov; ··· 604 604 return 0; 605 605 } 606 606 iterate_and_advance(i, bytes, v, 607 - __copy_from_user_nocache((to += v.iov_len) - v.iov_len, 607 + __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, 608 608 v.iov_base, v.iov_len), 609 609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 610 610 v.bv_offset, v.bv_len), ··· 625 625 if (unlikely(i->count < bytes)) 626 626 return false; 627 627 iterate_all_kinds(i, bytes, v, ({ 628 - if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, 628 + if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, 629 629 v.iov_base, v.iov_len)) 630 630 return false; 631 631 0;}),
+26
lib/usercopy.c
··· 1 + #include <linux/uaccess.h> 2 + 3 + /* out-of-line parts */ 4 + 5 + #ifndef INLINE_COPY_FROM_USER 6 + unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) 7 + { 8 + unsigned long res = n; 9 + if (likely(access_ok(VERIFY_READ, from, n))) 10 + res = raw_copy_from_user(to, from, n); 11 + if (unlikely(res)) 12 + memset(to + (n - res), 0, res); 13 + return res; 14 + } 15 + EXPORT_SYMBOL(_copy_from_user); 16 + #endif 17 + 18 + #ifndef INLINE_COPY_TO_USER 19 + unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) 20 + { 21 + if (likely(access_ok(VERIFY_WRITE, to, n))) 22 + n = raw_copy_to_user(to, from, n); 23 + return n; 24 + } 25 + EXPORT_SYMBOL(_copy_to_user); 26 + #endif
+1 -1
mm/memory.c
··· 4298 4298 * get paged out, therefore we'll never actually fault, and the 4299 4299 * below annotations will generate false positives. 4300 4300 */ 4301 - if (segment_eq(get_fs(), KERNEL_DS)) 4301 + if (uaccess_kernel()) 4302 4302 return; 4303 4303 if (pagefault_disabled()) 4304 4304 return;
+1 -4
net/rds/tcp.c
··· 84 84 /* doing it this way avoids calling tcp_sk() */ 85 85 void rds_tcp_nonagle(struct socket *sock) 86 86 { 87 - mm_segment_t oldfs = get_fs(); 88 87 int val = 1; 89 88 90 - set_fs(KERNEL_DS); 91 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val, 89 + kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (void *)&val, 92 90 sizeof(val)); 93 - set_fs(oldfs); 94 91 } 95 92 96 93 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
+1 -7
net/rds/tcp_send.c
··· 40 40 41 41 static void rds_tcp_cork(struct socket *sock, int val) 42 42 { 43 - mm_segment_t oldfs; 44 - 45 - oldfs = get_fs(); 46 - set_fs(KERNEL_DS); 47 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val, 48 - sizeof(val)); 49 - set_fs(oldfs); 43 + kernel_setsockopt(sock, SOL_TCP, TCP_CORK, (void *)&val, sizeof(val)); 50 44 } 51 45 52 46 void rds_tcp_xmit_path_prepare(struct rds_conn_path *cp)
-9
security/Kconfig
··· 125 125 validating memory ranges against heap object sizes in 126 126 support of CONFIG_HARDENED_USERCOPY. 127 127 128 - config HAVE_ARCH_HARDENED_USERCOPY 129 - bool 130 - help 131 - The architecture supports CONFIG_HARDENED_USERCOPY by 132 - calling check_object_size() just before performing the 133 - userspace copies in the low level implementation of 134 - copy_to_user() and copy_from_user(). 135 - 136 128 config HARDENED_USERCOPY 137 129 bool "Harden memory copies between kernel and userspace" 138 - depends on HAVE_ARCH_HARDENED_USERCOPY 139 130 depends on HAVE_HARDENED_USERCOPY_ALLOCATOR 140 131 select BUG 141 132 help
+1 -1
security/tomoyo/network.c
··· 608 608 static bool tomoyo_kernel_service(void) 609 609 { 610 610 /* Nothing to do if I am a kernel service. */ 611 - return segment_eq(get_fs(), KERNEL_DS); 611 + return uaccess_kernel(); 612 612 } 613 613 614 614 /**