Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.14-rc2 722 lines 22 kB view raw
1#ifndef _ASM_X86_UACCESS_H 2#define _ASM_X86_UACCESS_H 3/* 4 * User space memory access functions 5 */ 6#include <linux/compiler.h> 7#include <linux/kasan-checks.h> 8#include <linux/string.h> 9#include <asm/asm.h> 10#include <asm/page.h> 11#include <asm/smap.h> 12#include <asm/extable.h> 13 14/* 15 * The fs value determines whether argument validity checking should be 16 * performed or not. If get_fs() == USER_DS, checking is performed, with 17 * get_fs() == KERNEL_DS, checking is bypassed. 18 * 19 * For historical reasons, these macros are grossly misnamed. 20 */ 21 22#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 23 24#define KERNEL_DS MAKE_MM_SEG(-1UL) 25#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) 26 27#define get_ds() (KERNEL_DS) 28#define get_fs() (current->thread.addr_limit) 29static inline void set_fs(mm_segment_t fs) 30{ 31 current->thread.addr_limit = fs; 32 /* On user-mode return, check fs is correct */ 33 set_thread_flag(TIF_FSCHECK); 34} 35 36#define segment_eq(a, b) ((a).seg == (b).seg) 37 38#define user_addr_max() (current->thread.addr_limit.seg) 39#define __addr_ok(addr) \ 40 ((unsigned long __force)(addr) < user_addr_max()) 41 42/* 43 * Test whether a block of memory is a valid user space address. 44 * Returns 0 if the range is valid, nonzero otherwise. 45 */ 46static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 47{ 48 /* 49 * If we have used "sizeof()" for the size, 50 * we know it won't overflow the limit (but 51 * it might overflow the 'addr', so it's 52 * important to subtract the size from the 53 * limit, not add it to the address). 54 */ 55 if (__builtin_constant_p(size)) 56 return unlikely(addr > limit - size); 57 58 /* Arbitrary sizes? Be careful about overflow */ 59 addr += size; 60 if (unlikely(addr < size)) 61 return true; 62 return unlikely(addr > limit); 63} 64 65#define __range_not_ok(addr, size, limit) \ 66({ \ 67 __chk_user_ptr(addr); \ 68 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 69}) 70 71#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 72# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) 73#else 74# define WARN_ON_IN_IRQ() 75#endif 76 77/** 78 * access_ok: - Checks if a user space pointer is valid 79 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 80 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 81 * to write to a block, it is always safe to read from it. 82 * @addr: User space pointer to start of block to check 83 * @size: Size of block to check 84 * 85 * Context: User context only. This function may sleep if pagefaults are 86 * enabled. 87 * 88 * Checks if a pointer to a block of memory in user space is valid. 89 * 90 * Returns true (nonzero) if the memory block may be valid, false (zero) 91 * if it is definitely invalid. 92 * 93 * Note that, depending on architecture, this function probably just 94 * checks that the pointer is in the user space range - after calling 95 * this function, memory access functions may still return -EFAULT. 96 */ 97#define access_ok(type, addr, size) \ 98({ \ 99 WARN_ON_IN_IRQ(); \ 100 likely(!__range_not_ok(addr, size, user_addr_max())); \ 101}) 102 103/* 104 * These are the main single-value transfer routines. They automatically 105 * use the right size if we just have the right pointer type. 106 * 107 * This gets kind of ugly. We want to return _two_ values in "get_user()" 108 * and yet we don't want to do any pointers, because that is too much 109 * of a performance impact. Thus we have a few rather ugly macros here, 110 * and hide all the ugliness from the user. 111 * 112 * The "__xxx" versions of the user access functions are versions that 113 * do not verify the address space, that must have been done previously 114 * with a separate "access_ok()" call (this is used when we do multiple 115 * accesses to the same area of user memory). 116 */ 117 118extern int __get_user_1(void); 119extern int __get_user_2(void); 120extern int __get_user_4(void); 121extern int __get_user_8(void); 122extern int __get_user_bad(void); 123 124#define __uaccess_begin() stac() 125#define __uaccess_end() clac() 126 127/* 128 * This is a type: either unsigned long, if the argument fits into 129 * that type, or otherwise unsigned long long. 130 */ 131#define __inttype(x) \ 132__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 133 134/** 135 * get_user: - Get a simple variable from user space. 136 * @x: Variable to store result. 137 * @ptr: Source address, in user space. 138 * 139 * Context: User context only. This function may sleep if pagefaults are 140 * enabled. 141 * 142 * This macro copies a single simple variable from user space to kernel 143 * space. It supports simple types like char and int, but not larger 144 * data types like structures or arrays. 145 * 146 * @ptr must have pointer-to-simple-variable type, and the result of 147 * dereferencing @ptr must be assignable to @x without a cast. 148 * 149 * Returns zero on success, or -EFAULT on error. 150 * On error, the variable @x is set to zero. 151 */ 152/* 153 * Careful: we have to cast the result to the type of the pointer 154 * for sign reasons. 155 * 156 * The use of _ASM_DX as the register specifier is a bit of a 157 * simplification, as gcc only cares about it as the starting point 158 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 159 * (%ecx being the next register in gcc's x86 register sequence), and 160 * %rdx on 64 bits. 161 * 162 * Clang/LLVM cares about the size of the register, but still wants 163 * the base register for something that ends up being a pair. 164 */ 165#define get_user(x, ptr) \ 166({ \ 167 int __ret_gu; \ 168 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ 169 __chk_user_ptr(ptr); \ 170 might_fault(); \ 171 asm volatile("call __get_user_%P4" \ 172 : "=a" (__ret_gu), "=r" (__val_gu), \ 173 ASM_CALL_CONSTRAINT \ 174 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 175 (x) = (__force __typeof__(*(ptr))) __val_gu; \ 176 __builtin_expect(__ret_gu, 0); \ 177}) 178 179#define __put_user_x(size, x, ptr, __ret_pu) \ 180 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 181 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 182 183 184 185#ifdef CONFIG_X86_32 186#define __put_user_asm_u64(x, addr, err, errret) \ 187 asm volatile("\n" \ 188 "1: movl %%eax,0(%2)\n" \ 189 "2: movl %%edx,4(%2)\n" \ 190 "3:" \ 191 ".section .fixup,\"ax\"\n" \ 192 "4: movl %3,%0\n" \ 193 " jmp 3b\n" \ 194 ".previous\n" \ 195 _ASM_EXTABLE(1b, 4b) \ 196 _ASM_EXTABLE(2b, 4b) \ 197 : "=r" (err) \ 198 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 199 200#define __put_user_asm_ex_u64(x, addr) \ 201 asm volatile("\n" \ 202 "1: movl %%eax,0(%1)\n" \ 203 "2: movl %%edx,4(%1)\n" \ 204 "3:" \ 205 _ASM_EXTABLE_EX(1b, 2b) \ 206 _ASM_EXTABLE_EX(2b, 3b) \ 207 : : "A" (x), "r" (addr)) 208 209#define __put_user_x8(x, ptr, __ret_pu) \ 210 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 211 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212#else 213#define __put_user_asm_u64(x, ptr, retval, errret) \ 214 __put_user_asm(x, ptr, retval, "q", "", "er", errret) 215#define __put_user_asm_ex_u64(x, addr) \ 216 __put_user_asm_ex(x, addr, "q", "", "er") 217#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218#endif 219 220extern void __put_user_bad(void); 221 222/* 223 * Strange magic calling convention: pointer in %ecx, 224 * value in %eax(:%edx), return value in %eax. clobbers %rbx 225 */ 226extern void __put_user_1(void); 227extern void __put_user_2(void); 228extern void __put_user_4(void); 229extern void __put_user_8(void); 230 231/** 232 * put_user: - Write a simple value into user space. 233 * @x: Value to copy to user space. 234 * @ptr: Destination address, in user space. 235 * 236 * Context: User context only. This function may sleep if pagefaults are 237 * enabled. 238 * 239 * This macro copies a single simple value from kernel space to user 240 * space. It supports simple types like char and int, but not larger 241 * data types like structures or arrays. 242 * 243 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 244 * to the result of dereferencing @ptr. 245 * 246 * Returns zero on success, or -EFAULT on error. 247 */ 248#define put_user(x, ptr) \ 249({ \ 250 int __ret_pu; \ 251 __typeof__(*(ptr)) __pu_val; \ 252 __chk_user_ptr(ptr); \ 253 might_fault(); \ 254 __pu_val = x; \ 255 switch (sizeof(*(ptr))) { \ 256 case 1: \ 257 __put_user_x(1, __pu_val, ptr, __ret_pu); \ 258 break; \ 259 case 2: \ 260 __put_user_x(2, __pu_val, ptr, __ret_pu); \ 261 break; \ 262 case 4: \ 263 __put_user_x(4, __pu_val, ptr, __ret_pu); \ 264 break; \ 265 case 8: \ 266 __put_user_x8(__pu_val, ptr, __ret_pu); \ 267 break; \ 268 default: \ 269 __put_user_x(X, __pu_val, ptr, __ret_pu); \ 270 break; \ 271 } \ 272 __builtin_expect(__ret_pu, 0); \ 273}) 274 275#define __put_user_size(x, ptr, size, retval, errret) \ 276do { \ 277 retval = 0; \ 278 __chk_user_ptr(ptr); \ 279 switch (size) { \ 280 case 1: \ 281 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ 282 break; \ 283 case 2: \ 284 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 285 break; \ 286 case 4: \ 287 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ 288 break; \ 289 case 8: \ 290 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ 291 errret); \ 292 break; \ 293 default: \ 294 __put_user_bad(); \ 295 } \ 296} while (0) 297 298/* 299 * This doesn't do __uaccess_begin/end - the exception handling 300 * around it must do that. 301 */ 302#define __put_user_size_ex(x, ptr, size) \ 303do { \ 304 __chk_user_ptr(ptr); \ 305 switch (size) { \ 306 case 1: \ 307 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ 308 break; \ 309 case 2: \ 310 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ 311 break; \ 312 case 4: \ 313 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ 314 break; \ 315 case 8: \ 316 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ 317 break; \ 318 default: \ 319 __put_user_bad(); \ 320 } \ 321} while (0) 322 323#ifdef CONFIG_X86_32 324#define __get_user_asm_u64(x, ptr, retval, errret) \ 325({ \ 326 __typeof__(ptr) __ptr = (ptr); \ 327 asm volatile("\n" \ 328 "1: movl %2,%%eax\n" \ 329 "2: movl %3,%%edx\n" \ 330 "3:\n" \ 331 ".section .fixup,\"ax\"\n" \ 332 "4: mov %4,%0\n" \ 333 " xorl %%eax,%%eax\n" \ 334 " xorl %%edx,%%edx\n" \ 335 " jmp 3b\n" \ 336 ".previous\n" \ 337 _ASM_EXTABLE(1b, 4b) \ 338 _ASM_EXTABLE(2b, 4b) \ 339 : "=r" (retval), "=&A"(x) \ 340 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ 341 "i" (errret), "0" (retval)); \ 342}) 343 344#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() 345#else 346#define __get_user_asm_u64(x, ptr, retval, errret) \ 347 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 348#define __get_user_asm_ex_u64(x, ptr) \ 349 __get_user_asm_ex(x, ptr, "q", "", "=r") 350#endif 351 352#define __get_user_size(x, ptr, size, retval, errret) \ 353do { \ 354 retval = 0; \ 355 __chk_user_ptr(ptr); \ 356 switch (size) { \ 357 case 1: \ 358 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ 359 break; \ 360 case 2: \ 361 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ 362 break; \ 363 case 4: \ 364 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ 365 break; \ 366 case 8: \ 367 __get_user_asm_u64(x, ptr, retval, errret); \ 368 break; \ 369 default: \ 370 (x) = __get_user_bad(); \ 371 } \ 372} while (0) 373 374#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 375 asm volatile("\n" \ 376 "1: mov"itype" %2,%"rtype"1\n" \ 377 "2:\n" \ 378 ".section .fixup,\"ax\"\n" \ 379 "3: mov %3,%0\n" \ 380 " xor"itype" %"rtype"1,%"rtype"1\n" \ 381 " jmp 2b\n" \ 382 ".previous\n" \ 383 _ASM_EXTABLE(1b, 3b) \ 384 : "=r" (err), ltype(x) \ 385 : "m" (__m(addr)), "i" (errret), "0" (err)) 386 387#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 388 asm volatile("\n" \ 389 "1: mov"itype" %2,%"rtype"1\n" \ 390 "2:\n" \ 391 ".section .fixup,\"ax\"\n" \ 392 "3: mov %3,%0\n" \ 393 " jmp 2b\n" \ 394 ".previous\n" \ 395 _ASM_EXTABLE(1b, 3b) \ 396 : "=r" (err), ltype(x) \ 397 : "m" (__m(addr)), "i" (errret), "0" (err)) 398 399/* 400 * This doesn't do __uaccess_begin/end - the exception handling 401 * around it must do that. 402 */ 403#define __get_user_size_ex(x, ptr, size) \ 404do { \ 405 __chk_user_ptr(ptr); \ 406 switch (size) { \ 407 case 1: \ 408 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ 409 break; \ 410 case 2: \ 411 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ 412 break; \ 413 case 4: \ 414 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ 415 break; \ 416 case 8: \ 417 __get_user_asm_ex_u64(x, ptr); \ 418 break; \ 419 default: \ 420 (x) = __get_user_bad(); \ 421 } \ 422} while (0) 423 424#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 425 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 426 "2:\n" \ 427 ".section .fixup,\"ax\"\n" \ 428 "3:xor"itype" %"rtype"0,%"rtype"0\n" \ 429 " jmp 2b\n" \ 430 ".previous\n" \ 431 _ASM_EXTABLE_EX(1b, 3b) \ 432 : ltype(x) : "m" (__m(addr))) 433 434#define __put_user_nocheck(x, ptr, size) \ 435({ \ 436 int __pu_err; \ 437 __uaccess_begin(); \ 438 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ 439 __uaccess_end(); \ 440 __builtin_expect(__pu_err, 0); \ 441}) 442 443#define __get_user_nocheck(x, ptr, size) \ 444({ \ 445 int __gu_err; \ 446 __inttype(*(ptr)) __gu_val; \ 447 __uaccess_begin(); \ 448 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 449 __uaccess_end(); \ 450 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 451 __builtin_expect(__gu_err, 0); \ 452}) 453 454/* FIXME: this hack is definitely wrong -AK */ 455struct __large_struct { unsigned long buf[100]; }; 456#define __m(x) (*(struct __large_struct __user *)(x)) 457 458/* 459 * Tell gcc we read from memory instead of writing: this is because 460 * we do not write to any memory gcc knows about, so there are no 461 * aliasing issues. 462 */ 463#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 464 asm volatile("\n" \ 465 "1: mov"itype" %"rtype"1,%2\n" \ 466 "2:\n" \ 467 ".section .fixup,\"ax\"\n" \ 468 "3: mov %3,%0\n" \ 469 " jmp 2b\n" \ 470 ".previous\n" \ 471 _ASM_EXTABLE(1b, 3b) \ 472 : "=r"(err) \ 473 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 474 475#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ 476 asm volatile("1: mov"itype" %"rtype"0,%1\n" \ 477 "2:\n" \ 478 _ASM_EXTABLE_EX(1b, 2b) \ 479 : : ltype(x), "m" (__m(addr))) 480 481/* 482 * uaccess_try and catch 483 */ 484#define uaccess_try do { \ 485 current->thread.uaccess_err = 0; \ 486 __uaccess_begin(); \ 487 barrier(); 488 489#define uaccess_catch(err) \ 490 __uaccess_end(); \ 491 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ 492} while (0) 493 494/** 495 * __get_user: - Get a simple variable from user space, with less checking. 496 * @x: Variable to store result. 497 * @ptr: Source address, in user space. 498 * 499 * Context: User context only. This function may sleep if pagefaults are 500 * enabled. 501 * 502 * This macro copies a single simple variable from user space to kernel 503 * space. It supports simple types like char and int, but not larger 504 * data types like structures or arrays. 505 * 506 * @ptr must have pointer-to-simple-variable type, and the result of 507 * dereferencing @ptr must be assignable to @x without a cast. 508 * 509 * Caller must check the pointer with access_ok() before calling this 510 * function. 511 * 512 * Returns zero on success, or -EFAULT on error. 513 * On error, the variable @x is set to zero. 514 */ 515 516#define __get_user(x, ptr) \ 517 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 518 519/** 520 * __put_user: - Write a simple value into user space, with less checking. 521 * @x: Value to copy to user space. 522 * @ptr: Destination address, in user space. 523 * 524 * Context: User context only. This function may sleep if pagefaults are 525 * enabled. 526 * 527 * This macro copies a single simple value from kernel space to user 528 * space. It supports simple types like char and int, but not larger 529 * data types like structures or arrays. 530 * 531 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 532 * to the result of dereferencing @ptr. 533 * 534 * Caller must check the pointer with access_ok() before calling this 535 * function. 536 * 537 * Returns zero on success, or -EFAULT on error. 538 */ 539 540#define __put_user(x, ptr) \ 541 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 542 543/* 544 * {get|put}_user_try and catch 545 * 546 * get_user_try { 547 * get_user_ex(...); 548 * } get_user_catch(err) 549 */ 550#define get_user_try uaccess_try 551#define get_user_catch(err) uaccess_catch(err) 552 553#define get_user_ex(x, ptr) do { \ 554 unsigned long __gue_val; \ 555 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ 556 (x) = (__force __typeof__(*(ptr)))__gue_val; \ 557} while (0) 558 559#define put_user_try uaccess_try 560#define put_user_catch(err) uaccess_catch(err) 561 562#define put_user_ex(x, ptr) \ 563 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 564 565extern unsigned long 566copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 567extern __must_check long 568strncpy_from_user(char *dst, const char __user *src, long count); 569 570extern __must_check long strnlen_user(const char __user *str, long n); 571 572unsigned long __must_check clear_user(void __user *mem, unsigned long len); 573unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 574 575extern void __cmpxchg_wrong_size(void) 576 __compiletime_error("Bad argument size for cmpxchg"); 577 578#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 579({ \ 580 int __ret = 0; \ 581 __typeof__(ptr) __uval = (uval); \ 582 __typeof__(*(ptr)) __old = (old); \ 583 __typeof__(*(ptr)) __new = (new); \ 584 __uaccess_begin(); \ 585 switch (size) { \ 586 case 1: \ 587 { \ 588 asm volatile("\n" \ 589 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 590 "2:\n" \ 591 "\t.section .fixup, \"ax\"\n" \ 592 "3:\tmov %3, %0\n" \ 593 "\tjmp 2b\n" \ 594 "\t.previous\n" \ 595 _ASM_EXTABLE(1b, 3b) \ 596 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 597 : "i" (-EFAULT), "q" (__new), "1" (__old) \ 598 : "memory" \ 599 ); \ 600 break; \ 601 } \ 602 case 2: \ 603 { \ 604 asm volatile("\n" \ 605 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 606 "2:\n" \ 607 "\t.section .fixup, \"ax\"\n" \ 608 "3:\tmov %3, %0\n" \ 609 "\tjmp 2b\n" \ 610 "\t.previous\n" \ 611 _ASM_EXTABLE(1b, 3b) \ 612 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 613 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 614 : "memory" \ 615 ); \ 616 break; \ 617 } \ 618 case 4: \ 619 { \ 620 asm volatile("\n" \ 621 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 622 "2:\n" \ 623 "\t.section .fixup, \"ax\"\n" \ 624 "3:\tmov %3, %0\n" \ 625 "\tjmp 2b\n" \ 626 "\t.previous\n" \ 627 _ASM_EXTABLE(1b, 3b) \ 628 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 629 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 630 : "memory" \ 631 ); \ 632 break; \ 633 } \ 634 case 8: \ 635 { \ 636 if (!IS_ENABLED(CONFIG_X86_64)) \ 637 __cmpxchg_wrong_size(); \ 638 \ 639 asm volatile("\n" \ 640 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 641 "2:\n" \ 642 "\t.section .fixup, \"ax\"\n" \ 643 "3:\tmov %3, %0\n" \ 644 "\tjmp 2b\n" \ 645 "\t.previous\n" \ 646 _ASM_EXTABLE(1b, 3b) \ 647 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 648 : "i" (-EFAULT), "r" (__new), "1" (__old) \ 649 : "memory" \ 650 ); \ 651 break; \ 652 } \ 653 default: \ 654 __cmpxchg_wrong_size(); \ 655 } \ 656 __uaccess_end(); \ 657 *__uval = __old; \ 658 __ret; \ 659}) 660 661#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 662({ \ 663 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 664 __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 665 (old), (new), sizeof(*(ptr))) : \ 666 -EFAULT; \ 667}) 668 669/* 670 * movsl can be slow when source and dest are not both 8-byte aligned 671 */ 672#ifdef CONFIG_X86_INTEL_USERCOPY 673extern struct movsl_mask { 674 int mask; 675} ____cacheline_aligned_in_smp movsl_mask; 676#endif 677 678#define ARCH_HAS_NOCACHE_UACCESS 1 679 680#ifdef CONFIG_X86_32 681# include <asm/uaccess_32.h> 682#else 683# include <asm/uaccess_64.h> 684#endif 685 686/* 687 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 688 * nested NMI paths are careful to preserve CR2. 689 * 690 * Caller must use pagefault_enable/disable, or run in interrupt context, 691 * and also do a uaccess_ok() check 692 */ 693#define __copy_from_user_nmi __copy_from_user_inatomic 694 695/* 696 * The "unsafe" user accesses aren't really "unsafe", but the naming 697 * is a big fat warning: you have to not only do the access_ok() 698 * checking before using them, but you have to surround them with the 699 * user_access_begin/end() pair. 700 */ 701#define user_access_begin() __uaccess_begin() 702#define user_access_end() __uaccess_end() 703 704#define unsafe_put_user(x, ptr, err_label) \ 705do { \ 706 int __pu_err; \ 707 __typeof__(*(ptr)) __pu_val = (x); \ 708 __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 709 if (unlikely(__pu_err)) goto err_label; \ 710} while (0) 711 712#define unsafe_get_user(x, ptr, err_label) \ 713do { \ 714 int __gu_err; \ 715 __inttype(*(ptr)) __gu_val; \ 716 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 717 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 718 if (unlikely(__gu_err)) goto err_label; \ 719} while (0) 720 721#endif /* _ASM_X86_UACCESS_H */ 722