Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'uaccess.futex' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs into locking/core

Pull uaccess futex cleanups for Al Viro:

Consolidate access_ok() usage and the futex uaccess function zoo.

+94 -184
+2 -3
arch/alpha/include/asm/futex.h
··· 31 31 { 32 32 int oldval = 0, ret; 33 33 34 - pagefault_disable(); 34 + if (!access_ok(uaddr, sizeof(u32))) 35 + return -EFAULT; 35 36 36 37 switch (op) { 37 38 case FUTEX_OP_SET: ··· 53 52 default: 54 53 ret = -ENOSYS; 55 54 } 56 - 57 - pagefault_enable(); 58 55 59 56 if (!ret) 60 57 *oval = oldval;
+3 -2
arch/arc/include/asm/futex.h
··· 75 75 { 76 76 int oldval = 0, ret; 77 77 78 + if (!access_ok(uaddr, sizeof(u32))) 79 + return -EFAULT; 80 + 78 81 #ifndef CONFIG_ARC_HAS_LLSC 79 82 preempt_disable(); /* to guarantee atomic r-m-w of futex op */ 80 83 #endif 81 - pagefault_disable(); 82 84 83 85 switch (op) { 84 86 case FUTEX_OP_SET: ··· 103 101 ret = -ENOSYS; 104 102 } 105 103 106 - pagefault_enable(); 107 104 #ifndef CONFIG_ARC_HAS_LLSC 108 105 preempt_enable(); 109 106 #endif
+3 -2
arch/arm/include/asm/futex.h
··· 134 134 { 135 135 int oldval = 0, ret, tmp; 136 136 137 + if (!access_ok(uaddr, sizeof(u32))) 138 + return -EFAULT; 139 + 137 140 #ifndef CONFIG_SMP 138 141 preempt_disable(); 139 142 #endif 140 - pagefault_disable(); 141 143 142 144 switch (op) { 143 145 case FUTEX_OP_SET: ··· 161 159 ret = -ENOSYS; 162 160 } 163 161 164 - pagefault_enable(); 165 162 #ifndef CONFIG_SMP 166 163 preempt_enable(); 167 164 #endif
+2 -3
arch/arm64/include/asm/futex.h
··· 48 48 int oldval = 0, ret, tmp; 49 49 u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); 50 50 51 - pagefault_disable(); 51 + if (!access_ok(_uaddr, sizeof(u32))) 52 + return -EFAULT; 52 53 53 54 switch (op) { 54 55 case FUTEX_OP_SET: ··· 75 74 default: 76 75 ret = -ENOSYS; 77 76 } 78 - 79 - pagefault_enable(); 80 77 81 78 if (!ret) 82 79 *oval = oldval;
+2 -3
arch/hexagon/include/asm/futex.h
··· 36 36 { 37 37 int oldval = 0, ret; 38 38 39 - pagefault_disable(); 39 + if (!access_ok(uaddr, sizeof(u32))) 40 + return -EFAULT; 40 41 41 42 switch (op) { 42 43 case FUTEX_OP_SET: ··· 62 61 default: 63 62 ret = -ENOSYS; 64 63 } 65 - 66 - pagefault_enable(); 67 64 68 65 if (!ret) 69 66 *oval = oldval;
+2 -3
arch/ia64/include/asm/futex.h
··· 50 50 { 51 51 int oldval = 0, ret; 52 52 53 - pagefault_disable(); 53 + if (!access_ok(uaddr, sizeof(u32))) 54 + return -EFAULT; 54 55 55 56 switch (op) { 56 57 case FUTEX_OP_SET: ··· 74 73 default: 75 74 ret = -ENOSYS; 76 75 } 77 - 78 - pagefault_enable(); 79 76 80 77 if (!ret) 81 78 *oval = oldval;
+2 -3
arch/microblaze/include/asm/futex.h
··· 34 34 { 35 35 int oldval = 0, ret; 36 36 37 - pagefault_disable(); 37 + if (!access_ok(uaddr, sizeof(u32))) 38 + return -EFAULT; 38 39 39 40 switch (op) { 40 41 case FUTEX_OP_SET: ··· 56 55 default: 57 56 ret = -ENOSYS; 58 57 } 59 - 60 - pagefault_enable(); 61 58 62 59 if (!ret) 63 60 *oval = oldval;
+2 -3
arch/mips/include/asm/futex.h
··· 89 89 { 90 90 int oldval = 0, ret; 91 91 92 - pagefault_disable(); 92 + if (!access_ok(uaddr, sizeof(u32))) 93 + return -EFAULT; 93 94 94 95 switch (op) { 95 96 case FUTEX_OP_SET: ··· 116 115 default: 117 116 ret = -ENOSYS; 118 117 } 119 - 120 - pagefault_enable(); 121 118 122 119 if (!ret) 123 120 *oval = oldval;
+2 -4
arch/nds32/include/asm/futex.h
··· 66 66 { 67 67 int oldval = 0, ret; 68 68 69 - 70 - pagefault_disable(); 69 + if (!access_ok(uaddr, sizeof(u32))) 70 + return -EFAULT; 71 71 switch (op) { 72 72 case FUTEX_OP_SET: 73 73 __futex_atomic_op("move %0, %3", ret, oldval, tmp, uaddr, ··· 92 92 default: 93 93 ret = -ENOSYS; 94 94 } 95 - 96 - pagefault_enable(); 97 95 98 96 if (!ret) 99 97 *oval = oldval;
+2 -3
arch/openrisc/include/asm/futex.h
··· 35 35 { 36 36 int oldval = 0, ret; 37 37 38 - pagefault_disable(); 38 + if (!access_ok(uaddr, sizeof(u32))) 39 + return -EFAULT; 39 40 40 41 switch (op) { 41 42 case FUTEX_OP_SET: ··· 57 56 default: 58 57 ret = -ENOSYS; 59 58 } 60 - 61 - pagefault_enable(); 62 59 63 60 if (!ret) 64 61 *oval = oldval;
-2
arch/parisc/include/asm/futex.h
··· 40 40 u32 tmp; 41 41 42 42 _futex_spin_lock_irqsave(uaddr, &flags); 43 - pagefault_disable(); 44 43 45 44 ret = -EFAULT; 46 45 if (unlikely(get_user(oldval, uaddr) != 0)) ··· 72 73 ret = -EFAULT; 73 74 74 75 out_pagefault_enable: 75 - pagefault_enable(); 76 76 _futex_spin_unlock_irqrestore(uaddr, &flags); 77 77 78 78 if (!ret)
+2 -3
arch/powerpc/include/asm/futex.h
··· 35 35 { 36 36 int oldval = 0, ret; 37 37 38 + if (!access_ok(uaddr, sizeof(u32))) 39 + return -EFAULT; 38 40 allow_read_write_user(uaddr, uaddr, sizeof(*uaddr)); 39 - pagefault_disable(); 40 41 41 42 switch (op) { 42 43 case FUTEX_OP_SET: ··· 58 57 default: 59 58 ret = -ENOSYS; 60 59 } 61 - 62 - pagefault_enable(); 63 60 64 61 *oval = oldval; 65 62
+2 -3
arch/riscv/include/asm/futex.h
··· 46 46 { 47 47 int oldval = 0, ret = 0; 48 48 49 - pagefault_disable(); 49 + if (!access_ok(uaddr, sizeof(u32))) 50 + return -EFAULT; 50 51 51 52 switch (op) { 52 53 case FUTEX_OP_SET: ··· 73 72 default: 74 73 ret = -ENOSYS; 75 74 } 76 - 77 - pagefault_enable(); 78 75 79 76 if (!ret) 80 77 *oval = oldval;
-2
arch/s390/include/asm/futex.h
··· 29 29 mm_segment_t old_fs; 30 30 31 31 old_fs = enable_sacf_uaccess(); 32 - pagefault_disable(); 33 32 switch (op) { 34 33 case FUTEX_OP_SET: 35 34 __futex_atomic_op("lr %2,%5\n", ··· 53 54 default: 54 55 ret = -ENOSYS; 55 56 } 56 - pagefault_enable(); 57 57 disable_sacf_uaccess(old_fs); 58 58 59 59 if (!ret)
-4
arch/sh/include/asm/futex.h
··· 34 34 u32 oldval, newval, prev; 35 35 int ret; 36 36 37 - pagefault_disable(); 38 - 39 37 do { 40 38 ret = get_user(oldval, uaddr); 41 39 ··· 64 66 65 67 ret = futex_atomic_cmpxchg_inatomic(&prev, uaddr, oldval, newval); 66 68 } while (!ret && prev != oldval); 67 - 68 - pagefault_enable(); 69 69 70 70 if (!ret) 71 71 *oval = oldval;
-4
arch/sparc/include/asm/futex_64.h
··· 38 38 if (unlikely((((unsigned long) uaddr) & 0x3UL))) 39 39 return -EINVAL; 40 40 41 - pagefault_disable(); 42 - 43 41 switch (op) { 44 42 case FUTEX_OP_SET: 45 43 __futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg); ··· 57 59 default: 58 60 ret = -ENOSYS; 59 61 } 60 - 61 - pagefault_enable(); 62 62 63 63 if (!ret) 64 64 *oval = oldval;
+63 -36
arch/x86/include/asm/futex.h
··· 12 12 #include <asm/processor.h> 13 13 #include <asm/smap.h> 14 14 15 - #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ 16 - asm volatile("\t" ASM_STAC "\n" \ 17 - "1:\t" insn "\n" \ 18 - "2:\t" ASM_CLAC "\n" \ 15 + #define unsafe_atomic_op1(insn, oval, uaddr, oparg, label) \ 16 + do { \ 17 + int oldval = 0, ret; \ 18 + asm volatile("1:\t" insn "\n" \ 19 + "2:\n" \ 19 20 "\t.section .fixup,\"ax\"\n" \ 20 21 "3:\tmov\t%3, %1\n" \ 21 22 "\tjmp\t2b\n" \ 22 23 "\t.previous\n" \ 23 24 _ASM_EXTABLE_UA(1b, 3b) \ 24 25 : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ 25 - : "i" (-EFAULT), "0" (oparg), "1" (0)) 26 + : "i" (-EFAULT), "0" (oparg), "1" (0)); \ 27 + if (ret) \ 28 + goto label; \ 29 + *oval = oldval; \ 30 + } while(0) 26 31 27 - #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ 28 - asm volatile("\t" ASM_STAC "\n" \ 29 - "1:\tmovl %2, %0\n" \ 30 - "\tmovl\t%0, %3\n" \ 32 + 33 + #define unsafe_atomic_op2(insn, oval, uaddr, oparg, label) \ 34 + do { \ 35 + int oldval = 0, ret, tem; \ 36 + asm volatile("1:\tmovl %2, %0\n" \ 37 + "2:\tmovl\t%0, %3\n" \ 31 38 "\t" insn "\n" \ 32 - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ 33 - "\tjnz\t1b\n" \ 34 - "3:\t" ASM_CLAC "\n" \ 39 + "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ 40 + "\tjnz\t2b\n" \ 41 + "4:\n" \ 35 42 "\t.section .fixup,\"ax\"\n" \ 36 - "4:\tmov\t%5, %1\n" \ 37 - "\tjmp\t3b\n" \ 43 + "5:\tmov\t%5, %1\n" \ 44 + "\tjmp\t4b\n" \ 38 45 "\t.previous\n" \ 39 - _ASM_EXTABLE_UA(1b, 4b) \ 40 - _ASM_EXTABLE_UA(2b, 4b) \ 46 + _ASM_EXTABLE_UA(1b, 5b) \ 47 + _ASM_EXTABLE_UA(3b, 5b) \ 41 48 : "=&a" (oldval), "=&r" (ret), \ 42 49 "+m" (*uaddr), "=&r" (tem) \ 43 - : "r" (oparg), "i" (-EFAULT), "1" (0)) 50 + : "r" (oparg), "i" (-EFAULT), "1" (0)); \ 51 + if (ret) \ 52 + goto label; \ 53 + *oval = oldval; \ 54 + } while(0) 44 55 45 - static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, 56 + static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, 46 57 u32 __user *uaddr) 47 58 { 48 - int oldval = 0, ret, tem; 49 - 50 - pagefault_disable(); 59 + if (!user_access_begin(uaddr, sizeof(u32))) 60 + return -EFAULT; 51 61 52 62 switch (op) { 53 63 case FUTEX_OP_SET: 54 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); 64 + unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault); 55 65 break; 56 66 case FUTEX_OP_ADD: 57 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, 58 - uaddr, oparg); 67 + unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, 68 + uaddr, oparg, Efault); 59 69 break; 60 70 case FUTEX_OP_OR: 61 - __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); 71 + unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault); 62 72 break; 63 73 case FUTEX_OP_ANDN: 64 - __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); 74 + unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault); 65 75 break; 66 76 case FUTEX_OP_XOR: 67 - __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); 77 + unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault); 68 78 break; 69 79 default: 70 - ret = -ENOSYS; 80 + user_access_end(); 81 + return -ENOSYS; 71 82 } 72 - 73 - pagefault_enable(); 74 - 75 - if (!ret) 76 - *oval = oldval; 77 - 78 - return ret; 83 + user_access_end(); 84 + return 0; 85 + Efault: 86 + user_access_end(); 87 + return -EFAULT; 79 88 } 80 89 81 90 static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 82 91 u32 oldval, u32 newval) 83 92 { 84 - return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval); 93 + int ret = 0; 94 + 95 + if (!user_access_begin(uaddr, sizeof(u32))) 96 + return -EFAULT; 97 + asm volatile("\n" 98 + "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" 99 + "2:\n" 100 + "\t.section .fixup, \"ax\"\n" 101 + "3:\tmov %3, %0\n" 102 + "\tjmp 2b\n" 103 + "\t.previous\n" 104 + _ASM_EXTABLE_UA(1b, 3b) 105 + : "+r" (ret), "=a" (oldval), "+m" (*uaddr) 106 + : "i" (-EFAULT), "r" (newval), "1" (oldval) 107 + : "memory" 108 + ); 109 + user_access_end(); 110 + *uval = oldval; 111 + return ret; 85 112 } 86 113 87 114 #endif
-93
arch/x86/include/asm/uaccess.h
··· 584 584 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 585 585 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 586 586 587 - extern void __cmpxchg_wrong_size(void) 588 - __compiletime_error("Bad argument size for cmpxchg"); 589 - 590 - #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ 591 - ({ \ 592 - int __ret = 0; \ 593 - __typeof__(*(ptr)) __old = (old); \ 594 - __typeof__(*(ptr)) __new = (new); \ 595 - __uaccess_begin_nospec(); \ 596 - switch (size) { \ 597 - case 1: \ 598 - { \ 599 - asm volatile("\n" \ 600 - "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ 601 - "2:\n" \ 602 - "\t.section .fixup, \"ax\"\n" \ 603 - "3:\tmov %3, %0\n" \ 604 - "\tjmp 2b\n" \ 605 - "\t.previous\n" \ 606 - _ASM_EXTABLE_UA(1b, 3b) \ 607 - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 608 - : "i" (-EFAULT), "q" (__new), "1" (__old) \ 609 - : "memory" \ 610 - ); \ 611 - break; \ 612 - } \ 613 - case 2: \ 614 - { \ 615 - asm volatile("\n" \ 616 - "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ 617 - "2:\n" \ 618 - "\t.section .fixup, \"ax\"\n" \ 619 - "3:\tmov %3, %0\n" \ 620 - "\tjmp 2b\n" \ 621 - "\t.previous\n" \ 622 - _ASM_EXTABLE_UA(1b, 3b) \ 623 - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 624 - : "i" (-EFAULT), "r" (__new), "1" (__old) \ 625 - : "memory" \ 626 - ); \ 627 - break; \ 628 - } \ 629 - case 4: \ 630 - { \ 631 - asm volatile("\n" \ 632 - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ 633 - "2:\n" \ 634 - "\t.section .fixup, \"ax\"\n" \ 635 - "3:\tmov %3, %0\n" \ 636 - "\tjmp 2b\n" \ 637 - "\t.previous\n" \ 638 - _ASM_EXTABLE_UA(1b, 3b) \ 639 - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 640 - : "i" (-EFAULT), "r" (__new), "1" (__old) \ 641 - : "memory" \ 642 - ); \ 643 - break; \ 644 - } \ 645 - case 8: \ 646 - { \ 647 - if (!IS_ENABLED(CONFIG_X86_64)) \ 648 - __cmpxchg_wrong_size(); \ 649 - \ 650 - asm volatile("\n" \ 651 - "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ 652 - "2:\n" \ 653 - "\t.section .fixup, \"ax\"\n" \ 654 - "3:\tmov %3, %0\n" \ 655 - "\tjmp 2b\n" \ 656 - "\t.previous\n" \ 657 - _ASM_EXTABLE_UA(1b, 3b) \ 658 - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ 659 - : "i" (-EFAULT), "r" (__new), "1" (__old) \ 660 - : "memory" \ 661 - ); \ 662 - break; \ 663 - } \ 664 - default: \ 665 - __cmpxchg_wrong_size(); \ 666 - } \ 667 - __uaccess_end(); \ 668 - *(uval) = __old; \ 669 - __ret; \ 670 - }) 671 - 672 - #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ 673 - ({ \ 674 - access_ok((ptr), sizeof(*(ptr))) ? \ 675 - __user_atomic_cmpxchg_inatomic((uval), (ptr), \ 676 - (old), (new), sizeof(*(ptr))) : \ 677 - -EFAULT; \ 678 - }) 679 - 680 587 /* 681 588 * movsl can be slow when source and dest are not both 8-byte aligned 682 589 */
+2 -3
arch/xtensa/include/asm/futex.h
··· 72 72 #if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE 73 73 int oldval = 0, ret; 74 74 75 - pagefault_disable(); 75 + if (!access_ok(uaddr, sizeof(u32))) 76 + return -EFAULT; 76 77 77 78 switch (op) { 78 79 case FUTEX_OP_SET: ··· 99 98 default: 100 99 ret = -ENOSYS; 101 100 } 102 - 103 - pagefault_enable(); 104 101 105 102 if (!ret) 106 103 *oval = oldval;
-2
include/asm-generic/futex.h
··· 34 34 u32 tmp; 35 35 36 36 preempt_disable(); 37 - pagefault_disable(); 38 37 39 38 ret = -EFAULT; 40 39 if (unlikely(get_user(oldval, uaddr) != 0)) ··· 66 67 ret = -EFAULT; 67 68 68 69 out_pagefault_enable: 69 - pagefault_enable(); 70 70 preempt_enable(); 71 71 72 72 if (ret == 0)
+2 -3
kernel/futex.c
··· 1665 1665 oparg = 1 << oparg; 1666 1666 } 1667 1667 1668 - if (!access_ok(uaddr, sizeof(u32))) 1669 - return -EFAULT; 1670 - 1668 + pagefault_disable(); 1671 1669 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); 1670 + pagefault_enable(); 1672 1671 if (ret) 1673 1672 return ret; 1674 1673
+1
tools/objtool/check.c
··· 478 478 "__sanitizer_cov_trace_cmp2", 479 479 "__sanitizer_cov_trace_cmp4", 480 480 "__sanitizer_cov_trace_cmp8", 481 + "__sanitizer_cov_trace_switch", 481 482 /* UBSAN */ 482 483 "ubsan_type_mismatch_common", 483 484 "__ubsan_handle_type_mismatch",