Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/futex: Convert to scoped user access

Replace the open coded implementation with the scoped user access
guards

No functional change intended.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://patch.msgid.link/20251027083745.799714344@linutronix.de

authored by

Thomas Gleixner and committed by
Ingo Molnar
e02718c9 e4e28fd6

+33 -42
+33 -42
arch/x86/include/asm/futex.h
··· 46 46 } while(0) 47 47 48 48 static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, 49 - u32 __user *uaddr) 49 + u32 __user *uaddr) 50 50 { 51 - if (can_do_masked_user_access()) 52 - uaddr = masked_user_access_begin(uaddr); 53 - else if (!user_access_begin(uaddr, sizeof(u32))) 54 - return -EFAULT; 55 - 56 - switch (op) { 57 - case FUTEX_OP_SET: 58 - unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault); 59 - break; 60 - case FUTEX_OP_ADD: 61 - unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, 62 - uaddr, oparg, Efault); 63 - break; 64 - case FUTEX_OP_OR: 65 - unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault); 66 - break; 67 - case FUTEX_OP_ANDN: 68 - unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault); 69 - break; 70 - case FUTEX_OP_XOR: 71 - unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault); 72 - break; 73 - default: 74 - user_access_end(); 75 - return -ENOSYS; 51 + scoped_user_rw_access(uaddr, Efault) { 52 + switch (op) { 53 + case FUTEX_OP_SET: 54 + unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault); 55 + break; 56 + case FUTEX_OP_ADD: 57 + unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, uaddr, oparg, Efault); 58 + break; 59 + case FUTEX_OP_OR: 60 + unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault); 61 + break; 62 + case FUTEX_OP_ANDN: 63 + unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault); 64 + break; 65 + case FUTEX_OP_XOR: 66 + unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault); 67 + break; 68 + default: 69 + return -ENOSYS; 70 + } 76 71 } 77 - user_access_end(); 78 72 return 0; 79 73 Efault: 80 - user_access_end(); 81 74 return -EFAULT; 82 75 } 83 76 ··· 79 86 { 80 87 int ret = 0; 81 88 82 - if (can_do_masked_user_access()) 83 - uaddr = masked_user_access_begin(uaddr); 84 - else if (!user_access_begin(uaddr, sizeof(u32))) 85 - return -EFAULT; 86 - asm volatile("\n" 87 - "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" 88 - "2:\n" 89 - _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \ 90 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr) 91 - : "r" (newval), "1" (oldval) 92 - : "memory" 93 - ); 94 - user_access_end(); 95 - *uval = oldval; 89 + scoped_user_rw_access(uaddr, Efault) { 90 + asm_inline volatile("\n" 91 + "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" 92 + "2:\n" 93 + _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) 94 + : "+r" (ret), "=a" (oldval), "+m" (*uaddr) 95 + : "r" (newval), "1" (oldval) 96 + : "memory"); 97 + *uval = oldval; 98 + } 96 99 return ret; 100 + Efault: 101 + return -EFAULT; 97 102 } 98 103 99 104 #endif