Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-uaccess-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 uaccess changes from Ingo Molnar:
"A single change that micro-optimizes __copy_*_user_inatomic(), used by
the futex code"

* 'x86-uaccess-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic

+18 -6
+18 -6
arch/x86/include/asm/uaccess_64.h
··· 49 49 copy_in_user(void __user *to, const void __user *from, unsigned len); 50 50 51 51 static __always_inline __must_check 52 - int __copy_from_user(void *dst, const void __user *src, unsigned size) 52 + int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) 53 53 { 54 54 int ret = 0; 55 55 56 - might_fault(); 57 56 if (!__builtin_constant_p(size)) 58 57 return copy_user_generic(dst, (__force void *)src, size); 59 58 switch (size) { ··· 92 93 } 93 94 94 95 static __always_inline __must_check 95 - int __copy_to_user(void __user *dst, const void *src, unsigned size) 96 + int __copy_from_user(void *dst, const void __user *src, unsigned size) 97 + { 98 + might_fault(); 99 + return __copy_from_user_nocheck(dst, src, size); 100 + } 101 + 102 + static __always_inline __must_check 103 + int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) 96 104 { 97 105 int ret = 0; 98 106 99 - might_fault(); 100 107 if (!__builtin_constant_p(size)) 101 108 return copy_user_generic((__force void *)dst, src, size); 102 109 switch (size) { ··· 139 134 default: 140 135 return copy_user_generic((__force void *)dst, src, size); 141 136 } 137 + } 138 + 139 + static __always_inline __must_check 140 + int __copy_to_user(void __user *dst, const void *src, unsigned size) 141 + { 142 + might_fault(); 143 + return __copy_to_user_nocheck(dst, src, size); 142 144 } 143 145 144 146 static __always_inline __must_check ··· 204 192 static __must_check __always_inline int 205 193 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) 206 194 { 207 - return copy_user_generic(dst, (__force const void *)src, size); 195 + return __copy_from_user_nocheck(dst, (__force const void *)src, size); 208 196 } 209 197 210 198 static __must_check __always_inline int 211 199 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) 212 200 { 213 - return copy_user_generic((__force void *)dst, src, size); 201 + return __copy_to_user_nocheck((__force void *)dst, src, size); 214 202 } 215 203 216 204 extern long __copy_user_nocache(void *dst, const void __user *src,